]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-3.0-3.12.6-201312251834.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-3.0-3.12.6-201312251834.patch
1 .|,
2 -*-
3 '/'\`
4 /`'o\
5 /#,o'`\
6 o/`"#,`\o
7 /`o``"#,\
8 o/#,`'o'`\o
9 /o`"#,`',o\
10 o`-._`"#_.-'o
11 _|"|_
12 \=%=/ hjw
13 """
14 diff --git a/Documentation/dontdiff b/Documentation/dontdiff
15 index b89a739..79768fb 100644
16 --- a/Documentation/dontdiff
17 +++ b/Documentation/dontdiff
18 @@ -2,9 +2,11 @@
19 *.aux
20 *.bin
21 *.bz2
22 +*.c.[012]*.*
23 *.cis
24 *.cpio
25 *.csp
26 +*.dbg
27 *.dsp
28 *.dvi
29 *.elf
30 @@ -14,6 +16,7 @@
31 *.gcov
32 *.gen.S
33 *.gif
34 +*.gmo
35 *.grep
36 *.grp
37 *.gz
38 @@ -48,14 +51,17 @@
39 *.tab.h
40 *.tex
41 *.ver
42 +*.vim
43 *.xml
44 *.xz
45 *_MODULES
46 +*_reg_safe.h
47 *_vga16.c
48 *~
49 \#*#
50 *.9
51 -.*
52 +.[^g]*
53 +.gen*
54 .*.d
55 .mm
56 53c700_d.h
57 @@ -69,9 +75,11 @@ Image
58 Module.markers
59 Module.symvers
60 PENDING
61 +PERF*
62 SCCS
63 System.map*
64 TAGS
65 +TRACEEVENT-CFLAGS
66 aconf
67 af_names.h
68 aic7*reg.h*
69 @@ -80,6 +88,7 @@ aic7*seq.h*
70 aicasm
71 aicdb.h*
72 altivec*.c
73 +ashldi3.S
74 asm-offsets.h
75 asm_offsets.h
76 autoconf.h*
77 @@ -92,19 +101,24 @@ bounds.h
78 bsetup
79 btfixupprep
80 build
81 +builtin-policy.h
82 bvmlinux
83 bzImage*
84 capability_names.h
85 capflags.c
86 classlist.h*
87 +clut_vga16.c
88 +common-cmds.h
89 comp*.log
90 compile.h*
91 conf
92 config
93 config-*
94 config_data.h*
95 +config.c
96 config.mak
97 config.mak.autogen
98 +config.tmp
99 conmakehash
100 consolemap_deftbl.c*
101 cpustr.h
102 @@ -115,9 +129,11 @@ devlist.h*
103 dnotify_test
104 docproc
105 dslm
106 +dtc-lexer.lex.c
107 elf2ecoff
108 elfconfig.h*
109 evergreen_reg_safe.h
110 +exception_policy.conf
111 fixdep
112 flask.h
113 fore200e_mkfirm
114 @@ -125,12 +141,15 @@ fore200e_pca_fw.c*
115 gconf
116 gconf.glade.h
117 gen-devlist
118 +gen-kdb_cmds.c
119 gen_crc32table
120 gen_init_cpio
121 generated
122 genheaders
123 genksyms
124 *_gray256.c
125 +hash
126 +hid-example
127 hpet_example
128 hugepage-mmap
129 hugepage-shm
130 @@ -145,14 +164,14 @@ int32.c
131 int4.c
132 int8.c
133 kallsyms
134 -kconfig
135 +kern_constants.h
136 keywords.c
137 ksym.c*
138 ksym.h*
139 kxgettext
140 lex.c
141 lex.*.c
142 -linux
143 +lib1funcs.S
144 logo_*.c
145 logo_*_clut224.c
146 logo_*_mono.c
147 @@ -162,14 +181,15 @@ mach-types.h
148 machtypes.h
149 map
150 map_hugetlb
151 -media
152 mconf
153 +mdp
154 miboot*
155 mk_elfconfig
156 mkboot
157 mkbugboot
158 mkcpustr
159 mkdep
160 +mkpiggy
161 mkprep
162 mkregtable
163 mktables
164 @@ -185,6 +205,8 @@ oui.c*
165 page-types
166 parse.c
167 parse.h
168 +parse-events*
169 +pasyms.h
170 patches*
171 pca200e.bin
172 pca200e_ecd.bin2
173 @@ -194,6 +216,7 @@ perf-archive
174 piggyback
175 piggy.gzip
176 piggy.S
177 +pmu-*
178 pnmtologo
179 ppc_defs.h*
180 pss_boot.h
181 @@ -203,7 +226,10 @@ r200_reg_safe.h
182 r300_reg_safe.h
183 r420_reg_safe.h
184 r600_reg_safe.h
185 +realmode.lds
186 +realmode.relocs
187 recordmcount
188 +regdb.c
189 relocs
190 rlim_names.h
191 rn50_reg_safe.h
192 @@ -213,8 +239,12 @@ series
193 setup
194 setup.bin
195 setup.elf
196 +signing_key*
197 +size_overflow_hash.h
198 sImage
199 +slabinfo
200 sm_tbl*
201 +sortextable
202 split-include
203 syscalltab.h
204 tables.c
205 @@ -224,6 +254,7 @@ tftpboot.img
206 timeconst.h
207 times.h*
208 trix_boot.h
209 +user_constants.h
210 utsrelease.h*
211 vdso-syms.lds
212 vdso.lds
213 @@ -235,13 +266,17 @@ vdso32.lds
214 vdso32.so.dbg
215 vdso64.lds
216 vdso64.so.dbg
217 +vdsox32.lds
218 +vdsox32-syms.lds
219 version.h*
220 vmImage
221 vmlinux
222 vmlinux-*
223 vmlinux.aout
224 vmlinux.bin.all
225 +vmlinux.bin.bz2
226 vmlinux.lds
227 +vmlinux.relocs
228 vmlinuz
229 voffset.h
230 vsyscall.lds
231 @@ -249,9 +284,12 @@ vsyscall_32.lds
232 wanxlfw.inc
233 uImage
234 unifdef
235 +utsrelease.h
236 wakeup.bin
237 wakeup.elf
238 wakeup.lds
239 +x509*
240 zImage*
241 zconf.hash.c
242 +zconf.lex.c
243 zoffset.h
244 diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
245 index fcbb736..5508d8c 100644
246 --- a/Documentation/kernel-parameters.txt
247 +++ b/Documentation/kernel-parameters.txt
248 @@ -1031,6 +1031,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
249 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
250 Default: 1024
251
252 + grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
253 + ignore grsecurity's /proc restrictions
254 +
255 +
256 hashdist= [KNL,NUMA] Large hashes allocated during boot
257 are distributed across NUMA nodes. Defaults on
258 for 64-bit NUMA, off otherwise.
259 @@ -1999,6 +2003,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
260 noexec=on: enable non-executable mappings (default)
261 noexec=off: disable non-executable mappings
262
263 + nopcid [X86-64]
264 + Disable PCID (Process-Context IDentifier) even if it
265 + is supported by the processor.
266 +
267 nosmap [X86]
268 Disable SMAP (Supervisor Mode Access Prevention)
269 even if it is supported by processor.
270 @@ -2266,6 +2274,25 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
271 the specified number of seconds. This is to be used if
272 your oopses keep scrolling off the screen.
273
274 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
275 + virtualization environments that don't cope well with the
276 + expand down segment used by UDEREF on X86-32 or the frequent
277 + page table updates on X86-64.
278 +
279 + pax_sanitize_slab=
280 + 0/1 to disable/enable slab object sanitization (enabled by
281 + default).
282 +
283 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
284 +
285 + pax_extra_latent_entropy
286 + Enable a very simple form of latent entropy extraction
287 + from the first 4GB of memory as the bootmem allocator
288 + passes the memory pages to the buddy allocator.
289 +
290 + pax_weakuderef [X86-64] enables the weaker but faster form of UDEREF
291 + when the processor supports PCID.
292 +
293 pcbit= [HW,ISDN]
294
295 pcd. [PARIDE]
296 diff --git a/Makefile b/Makefile
297 index 2b23383..a66cff0 100644
298 --- a/Makefile
299 +++ b/Makefile
300 @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
301
302 HOSTCC = gcc
303 HOSTCXX = g++
304 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
305 -HOSTCXXFLAGS = -O2
306 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
307 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
308 +HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
309
310 # Decide whether to build built-in, modular, or both.
311 # Normally, just do built-in.
312 @@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
313 # Rules shared between *config targets and build targets
314
315 # Basic helpers built in scripts/
316 -PHONY += scripts_basic
317 -scripts_basic:
318 +PHONY += scripts_basic gcc-plugins
319 +scripts_basic: gcc-plugins
320 $(Q)$(MAKE) $(build)=scripts/basic
321 $(Q)rm -f .tmp_quiet_recordmcount
322
323 @@ -576,6 +577,65 @@ else
324 KBUILD_CFLAGS += -O2
325 endif
326
327 +ifndef DISABLE_PAX_PLUGINS
328 +ifeq ($(call cc-ifversion, -ge, 0408, y), y)
329 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
330 +else
331 +PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
332 +endif
333 +ifneq ($(PLUGINCC),)
334 +ifdef CONFIG_PAX_CONSTIFY_PLUGIN
335 +CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
336 +endif
337 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
338 +STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
339 +STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
340 +endif
341 +ifdef CONFIG_KALLOCSTAT_PLUGIN
342 +KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
343 +endif
344 +ifdef CONFIG_PAX_KERNEXEC_PLUGIN
345 +KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
346 +KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
347 +KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
348 +endif
349 +ifdef CONFIG_CHECKER_PLUGIN
350 +ifeq ($(call cc-ifversion, -ge, 0406, y), y)
351 +CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
352 +endif
353 +endif
354 +COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
355 +ifdef CONFIG_PAX_SIZE_OVERFLOW
356 +SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
357 +endif
358 +ifdef CONFIG_PAX_LATENT_ENTROPY
359 +LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
360 +endif
361 +ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
362 +STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
363 +endif
364 +GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
365 +GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
366 +GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
367 +GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
368 +export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
369 +ifeq ($(KBUILD_EXTMOD),)
370 +gcc-plugins:
371 + $(Q)$(MAKE) $(build)=tools/gcc
372 +else
373 +gcc-plugins: ;
374 +endif
375 +else
376 +gcc-plugins:
377 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
378 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
379 +else
380 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
381 +endif
382 + $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
383 +endif
384 +endif
385 +
386 include $(srctree)/arch/$(SRCARCH)/Makefile
387
388 ifdef CONFIG_READABLE_ASM
389 @@ -733,7 +793,7 @@ export mod_sign_cmd
390
391
392 ifeq ($(KBUILD_EXTMOD),)
393 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
394 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
395
396 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
397 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
398 @@ -782,6 +842,8 @@ endif
399
400 # The actual objects are generated when descending,
401 # make sure no implicit rule kicks in
402 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
403 +$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
404 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
405
406 # Handle descending into subdirectories listed in $(vmlinux-dirs)
407 @@ -791,7 +853,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
408 # Error messages still appears in the original language
409
410 PHONY += $(vmlinux-dirs)
411 -$(vmlinux-dirs): prepare scripts
412 +$(vmlinux-dirs): gcc-plugins prepare scripts
413 $(Q)$(MAKE) $(build)=$@
414
415 define filechk_kernel.release
416 @@ -838,6 +900,7 @@ prepare0: archprepare FORCE
417 $(Q)$(MAKE) $(build)=.
418
419 # All the preparing..
420 +prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
421 prepare: prepare0
422
423 # Generate some files
424 @@ -945,6 +1008,8 @@ all: modules
425 # using awk while concatenating to the final file.
426
427 PHONY += modules
428 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
429 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
430 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
431 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
432 @$(kecho) ' Building modules, stage 2.';
433 @@ -960,7 +1025,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
434
435 # Target to prepare building external modules
436 PHONY += modules_prepare
437 -modules_prepare: prepare scripts
438 +modules_prepare: gcc-plugins prepare scripts
439
440 # Target to install modules
441 PHONY += modules_install
442 @@ -1026,7 +1091,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
443 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
444 signing_key.priv signing_key.x509 x509.genkey \
445 extra_certificates signing_key.x509.keyid \
446 - signing_key.x509.signer
447 + signing_key.x509.signer tools/gcc/size_overflow_hash.h
448
449 # clean - Delete most, but leave enough to build external modules
450 #
451 @@ -1066,6 +1131,7 @@ distclean: mrproper
452 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
453 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
454 -o -name '.*.rej' \
455 + -o -name '.*.rej' -o -name '*.so' \
456 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
457 -type f -print | xargs rm -f
458
459 @@ -1227,6 +1293,8 @@ PHONY += $(module-dirs) modules
460 $(module-dirs): crmodverdir $(objtree)/Module.symvers
461 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
462
463 +modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
464 +modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
465 modules: $(module-dirs)
466 @$(kecho) ' Building modules, stage 2.';
467 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
468 @@ -1366,17 +1434,21 @@ else
469 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
470 endif
471
472 -%.s: %.c prepare scripts FORCE
473 +%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
474 +%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
475 +%.s: %.c gcc-plugins prepare scripts FORCE
476 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
477 %.i: %.c prepare scripts FORCE
478 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
479 -%.o: %.c prepare scripts FORCE
480 +%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
481 +%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
482 +%.o: %.c gcc-plugins prepare scripts FORCE
483 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
484 %.lst: %.c prepare scripts FORCE
485 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
486 -%.s: %.S prepare scripts FORCE
487 +%.s: %.S gcc-plugins prepare scripts FORCE
488 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
489 -%.o: %.S prepare scripts FORCE
490 +%.o: %.S gcc-plugins prepare scripts FORCE
491 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
492 %.symtypes: %.c prepare scripts FORCE
493 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
494 @@ -1386,11 +1458,15 @@ endif
495 $(cmd_crmodverdir)
496 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
497 $(build)=$(build-dir)
498 -%/: prepare scripts FORCE
499 +%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
500 +%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
501 +%/: gcc-plugins prepare scripts FORCE
502 $(cmd_crmodverdir)
503 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
504 $(build)=$(build-dir)
505 -%.ko: prepare scripts FORCE
506 +%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
507 +%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
508 +%.ko: gcc-plugins prepare scripts FORCE
509 $(cmd_crmodverdir)
510 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
511 $(build)=$(build-dir) $(@:.ko=.o)
512 diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
513 index 78b03ef..da28a51 100644
514 --- a/arch/alpha/include/asm/atomic.h
515 +++ b/arch/alpha/include/asm/atomic.h
516 @@ -292,6 +292,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
517 #define atomic_dec(v) atomic_sub(1,(v))
518 #define atomic64_dec(v) atomic64_sub(1,(v))
519
520 +#define atomic64_read_unchecked(v) atomic64_read(v)
521 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
522 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
523 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
524 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
525 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
526 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
527 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
528 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
529 +
530 #define smp_mb__before_atomic_dec() smp_mb()
531 #define smp_mb__after_atomic_dec() smp_mb()
532 #define smp_mb__before_atomic_inc() smp_mb()
533 diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
534 index ad368a9..fbe0f25 100644
535 --- a/arch/alpha/include/asm/cache.h
536 +++ b/arch/alpha/include/asm/cache.h
537 @@ -4,19 +4,19 @@
538 #ifndef __ARCH_ALPHA_CACHE_H
539 #define __ARCH_ALPHA_CACHE_H
540
541 +#include <linux/const.h>
542
543 /* Bytes per L1 (data) cache line. */
544 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
545 -# define L1_CACHE_BYTES 64
546 # define L1_CACHE_SHIFT 6
547 #else
548 /* Both EV4 and EV5 are write-through, read-allocate,
549 direct-mapped, physical.
550 */
551 -# define L1_CACHE_BYTES 32
552 # define L1_CACHE_SHIFT 5
553 #endif
554
555 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
556 #define SMP_CACHE_BYTES L1_CACHE_BYTES
557
558 #endif
559 diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
560 index 968d999..d36b2df 100644
561 --- a/arch/alpha/include/asm/elf.h
562 +++ b/arch/alpha/include/asm/elf.h
563 @@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
564
565 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
566
567 +#ifdef CONFIG_PAX_ASLR
568 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
569 +
570 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
571 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
572 +#endif
573 +
574 /* $0 is set by ld.so to a pointer to a function which might be
575 registered using atexit. This provides a mean for the dynamic
576 linker to call DT_FINI functions for shared libraries that have
577 diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
578 index bc2a0da..8ad11ee 100644
579 --- a/arch/alpha/include/asm/pgalloc.h
580 +++ b/arch/alpha/include/asm/pgalloc.h
581 @@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
582 pgd_set(pgd, pmd);
583 }
584
585 +static inline void
586 +pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
587 +{
588 + pgd_populate(mm, pgd, pmd);
589 +}
590 +
591 extern pgd_t *pgd_alloc(struct mm_struct *mm);
592
593 static inline void
594 diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
595 index d8f9b7e..f6222fa 100644
596 --- a/arch/alpha/include/asm/pgtable.h
597 +++ b/arch/alpha/include/asm/pgtable.h
598 @@ -102,6 +102,17 @@ struct vm_area_struct;
599 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
600 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
601 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
602 +
603 +#ifdef CONFIG_PAX_PAGEEXEC
604 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
605 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
606 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
607 +#else
608 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
609 +# define PAGE_COPY_NOEXEC PAGE_COPY
610 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
611 +#endif
612 +
613 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
614
615 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
616 diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
617 index 2fd00b7..cfd5069 100644
618 --- a/arch/alpha/kernel/module.c
619 +++ b/arch/alpha/kernel/module.c
620 @@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
621
622 /* The small sections were sorted to the end of the segment.
623 The following should definitely cover them. */
624 - gp = (u64)me->module_core + me->core_size - 0x8000;
625 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
626 got = sechdrs[me->arch.gotsecindex].sh_addr;
627
628 for (i = 0; i < n; i++) {
629 diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
630 index 1402fcc..0b1abd2 100644
631 --- a/arch/alpha/kernel/osf_sys.c
632 +++ b/arch/alpha/kernel/osf_sys.c
633 @@ -1298,10 +1298,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
634 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
635
636 static unsigned long
637 -arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
638 - unsigned long limit)
639 +arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
640 + unsigned long limit, unsigned long flags)
641 {
642 struct vm_unmapped_area_info info;
643 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
644
645 info.flags = 0;
646 info.length = len;
647 @@ -1309,6 +1310,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
648 info.high_limit = limit;
649 info.align_mask = 0;
650 info.align_offset = 0;
651 + info.threadstack_offset = offset;
652 return vm_unmapped_area(&info);
653 }
654
655 @@ -1341,20 +1343,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
656 merely specific addresses, but regions of memory -- perhaps
657 this feature should be incorporated into all ports? */
658
659 +#ifdef CONFIG_PAX_RANDMMAP
660 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
661 +#endif
662 +
663 if (addr) {
664 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
665 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
666 if (addr != (unsigned long) -ENOMEM)
667 return addr;
668 }
669
670 /* Next, try allocating at TASK_UNMAPPED_BASE. */
671 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
672 - len, limit);
673 + addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
674 +
675 if (addr != (unsigned long) -ENOMEM)
676 return addr;
677
678 /* Finally, try allocating in low memory. */
679 - addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
680 + addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
681
682 return addr;
683 }
684 diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
685 index 98838a0..b304fb4 100644
686 --- a/arch/alpha/mm/fault.c
687 +++ b/arch/alpha/mm/fault.c
688 @@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
689 __reload_thread(pcb);
690 }
691
692 +#ifdef CONFIG_PAX_PAGEEXEC
693 +/*
694 + * PaX: decide what to do with offenders (regs->pc = fault address)
695 + *
696 + * returns 1 when task should be killed
697 + * 2 when patched PLT trampoline was detected
698 + * 3 when unpatched PLT trampoline was detected
699 + */
700 +static int pax_handle_fetch_fault(struct pt_regs *regs)
701 +{
702 +
703 +#ifdef CONFIG_PAX_EMUPLT
704 + int err;
705 +
706 + do { /* PaX: patched PLT emulation #1 */
707 + unsigned int ldah, ldq, jmp;
708 +
709 + err = get_user(ldah, (unsigned int *)regs->pc);
710 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
711 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
712 +
713 + if (err)
714 + break;
715 +
716 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
717 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
718 + jmp == 0x6BFB0000U)
719 + {
720 + unsigned long r27, addr;
721 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
722 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
723 +
724 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
725 + err = get_user(r27, (unsigned long *)addr);
726 + if (err)
727 + break;
728 +
729 + regs->r27 = r27;
730 + regs->pc = r27;
731 + return 2;
732 + }
733 + } while (0);
734 +
735 + do { /* PaX: patched PLT emulation #2 */
736 + unsigned int ldah, lda, br;
737 +
738 + err = get_user(ldah, (unsigned int *)regs->pc);
739 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
740 + err |= get_user(br, (unsigned int *)(regs->pc+8));
741 +
742 + if (err)
743 + break;
744 +
745 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
746 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
747 + (br & 0xFFE00000U) == 0xC3E00000U)
748 + {
749 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
750 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
751 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
752 +
753 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
754 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
755 + return 2;
756 + }
757 + } while (0);
758 +
759 + do { /* PaX: unpatched PLT emulation */
760 + unsigned int br;
761 +
762 + err = get_user(br, (unsigned int *)regs->pc);
763 +
764 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
765 + unsigned int br2, ldq, nop, jmp;
766 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
767 +
768 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
769 + err = get_user(br2, (unsigned int *)addr);
770 + err |= get_user(ldq, (unsigned int *)(addr+4));
771 + err |= get_user(nop, (unsigned int *)(addr+8));
772 + err |= get_user(jmp, (unsigned int *)(addr+12));
773 + err |= get_user(resolver, (unsigned long *)(addr+16));
774 +
775 + if (err)
776 + break;
777 +
778 + if (br2 == 0xC3600000U &&
779 + ldq == 0xA77B000CU &&
780 + nop == 0x47FF041FU &&
781 + jmp == 0x6B7B0000U)
782 + {
783 + regs->r28 = regs->pc+4;
784 + regs->r27 = addr+16;
785 + regs->pc = resolver;
786 + return 3;
787 + }
788 + }
789 + } while (0);
790 +#endif
791 +
792 + return 1;
793 +}
794 +
795 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
796 +{
797 + unsigned long i;
798 +
799 + printk(KERN_ERR "PAX: bytes at PC: ");
800 + for (i = 0; i < 5; i++) {
801 + unsigned int c;
802 + if (get_user(c, (unsigned int *)pc+i))
803 + printk(KERN_CONT "???????? ");
804 + else
805 + printk(KERN_CONT "%08x ", c);
806 + }
807 + printk("\n");
808 +}
809 +#endif
810
811 /*
812 * This routine handles page faults. It determines the address,
813 @@ -133,8 +251,29 @@ retry:
814 good_area:
815 si_code = SEGV_ACCERR;
816 if (cause < 0) {
817 - if (!(vma->vm_flags & VM_EXEC))
818 + if (!(vma->vm_flags & VM_EXEC)) {
819 +
820 +#ifdef CONFIG_PAX_PAGEEXEC
821 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
822 + goto bad_area;
823 +
824 + up_read(&mm->mmap_sem);
825 + switch (pax_handle_fetch_fault(regs)) {
826 +
827 +#ifdef CONFIG_PAX_EMUPLT
828 + case 2:
829 + case 3:
830 + return;
831 +#endif
832 +
833 + }
834 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
835 + do_group_exit(SIGKILL);
836 +#else
837 goto bad_area;
838 +#endif
839 +
840 + }
841 } else if (!cause) {
842 /* Allow reads even for write-only mappings */
843 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
844 diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
845 index 1ad6fb6..9406b3d 100644
846 --- a/arch/arm/Kconfig
847 +++ b/arch/arm/Kconfig
848 @@ -1832,7 +1832,7 @@ config ALIGNMENT_TRAP
849
850 config UACCESS_WITH_MEMCPY
851 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
852 - depends on MMU
853 + depends on MMU && !PAX_MEMORY_UDEREF
854 default y if CPU_FEROCEON
855 help
856 Implement faster copy_to_user and clear_user methods for CPU
857 @@ -2097,6 +2097,7 @@ config XIP_PHYS_ADDR
858 config KEXEC
859 bool "Kexec system call (EXPERIMENTAL)"
860 depends on (!SMP || PM_SLEEP_SMP)
861 + depends on !GRKERNSEC_KMEM
862 help
863 kexec is a system call that implements the ability to shutdown your
864 current kernel, and to start another kernel. It is like a reboot
865 diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
866 index da1c77d..2ee6056 100644
867 --- a/arch/arm/include/asm/atomic.h
868 +++ b/arch/arm/include/asm/atomic.h
869 @@ -17,17 +17,35 @@
870 #include <asm/barrier.h>
871 #include <asm/cmpxchg.h>
872
873 +#ifdef CONFIG_GENERIC_ATOMIC64
874 +#include <asm-generic/atomic64.h>
875 +#endif
876 +
877 #define ATOMIC_INIT(i) { (i) }
878
879 #ifdef __KERNEL__
880
881 +#define _ASM_EXTABLE(from, to) \
882 +" .pushsection __ex_table,\"a\"\n"\
883 +" .align 3\n" \
884 +" .long " #from ", " #to"\n" \
885 +" .popsection"
886 +
887 /*
888 * On ARM, ordinary assignment (str instruction) doesn't clear the local
889 * strex/ldrex monitor on some implementations. The reason we can use it for
890 * atomic_set() is the clrex or dummy strex done on every exception return.
891 */
892 #define atomic_read(v) (*(volatile int *)&(v)->counter)
893 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
894 +{
895 + return v->counter;
896 +}
897 #define atomic_set(v,i) (((v)->counter) = (i))
898 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
899 +{
900 + v->counter = i;
901 +}
902
903 #if __LINUX_ARM_ARCH__ >= 6
904
905 @@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
906 int result;
907
908 __asm__ __volatile__("@ atomic_add\n"
909 +"1: ldrex %1, [%3]\n"
910 +" adds %0, %1, %4\n"
911 +
912 +#ifdef CONFIG_PAX_REFCOUNT
913 +" bvc 3f\n"
914 +"2: bkpt 0xf103\n"
915 +"3:\n"
916 +#endif
917 +
918 +" strex %1, %0, [%3]\n"
919 +" teq %1, #0\n"
920 +" bne 1b"
921 +
922 +#ifdef CONFIG_PAX_REFCOUNT
923 +"\n4:\n"
924 + _ASM_EXTABLE(2b, 4b)
925 +#endif
926 +
927 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
928 + : "r" (&v->counter), "Ir" (i)
929 + : "cc");
930 +}
931 +
932 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
933 +{
934 + unsigned long tmp;
935 + int result;
936 +
937 + __asm__ __volatile__("@ atomic_add_unchecked\n"
938 "1: ldrex %0, [%3]\n"
939 " add %0, %0, %4\n"
940 " strex %1, %0, [%3]\n"
941 @@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
942 smp_mb();
943
944 __asm__ __volatile__("@ atomic_add_return\n"
945 +"1: ldrex %1, [%3]\n"
946 +" adds %0, %1, %4\n"
947 +
948 +#ifdef CONFIG_PAX_REFCOUNT
949 +" bvc 3f\n"
950 +" mov %0, %1\n"
951 +"2: bkpt 0xf103\n"
952 +"3:\n"
953 +#endif
954 +
955 +" strex %1, %0, [%3]\n"
956 +" teq %1, #0\n"
957 +" bne 1b"
958 +
959 +#ifdef CONFIG_PAX_REFCOUNT
960 +"\n4:\n"
961 + _ASM_EXTABLE(2b, 4b)
962 +#endif
963 +
964 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
965 + : "r" (&v->counter), "Ir" (i)
966 + : "cc");
967 +
968 + smp_mb();
969 +
970 + return result;
971 +}
972 +
973 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
974 +{
975 + unsigned long tmp;
976 + int result;
977 +
978 + smp_mb();
979 +
980 + __asm__ __volatile__("@ atomic_add_return_unchecked\n"
981 "1: ldrex %0, [%3]\n"
982 " add %0, %0, %4\n"
983 " strex %1, %0, [%3]\n"
984 @@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
985 int result;
986
987 __asm__ __volatile__("@ atomic_sub\n"
988 +"1: ldrex %1, [%3]\n"
989 +" subs %0, %1, %4\n"
990 +
991 +#ifdef CONFIG_PAX_REFCOUNT
992 +" bvc 3f\n"
993 +"2: bkpt 0xf103\n"
994 +"3:\n"
995 +#endif
996 +
997 +" strex %1, %0, [%3]\n"
998 +" teq %1, #0\n"
999 +" bne 1b"
1000 +
1001 +#ifdef CONFIG_PAX_REFCOUNT
1002 +"\n4:\n"
1003 + _ASM_EXTABLE(2b, 4b)
1004 +#endif
1005 +
1006 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1007 + : "r" (&v->counter), "Ir" (i)
1008 + : "cc");
1009 +}
1010 +
1011 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1012 +{
1013 + unsigned long tmp;
1014 + int result;
1015 +
1016 + __asm__ __volatile__("@ atomic_sub_unchecked\n"
1017 "1: ldrex %0, [%3]\n"
1018 " sub %0, %0, %4\n"
1019 " strex %1, %0, [%3]\n"
1020 @@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1021 smp_mb();
1022
1023 __asm__ __volatile__("@ atomic_sub_return\n"
1024 -"1: ldrex %0, [%3]\n"
1025 -" sub %0, %0, %4\n"
1026 +"1: ldrex %1, [%3]\n"
1027 +" subs %0, %1, %4\n"
1028 +
1029 +#ifdef CONFIG_PAX_REFCOUNT
1030 +" bvc 3f\n"
1031 +" mov %0, %1\n"
1032 +"2: bkpt 0xf103\n"
1033 +"3:\n"
1034 +#endif
1035 +
1036 " strex %1, %0, [%3]\n"
1037 " teq %1, #0\n"
1038 " bne 1b"
1039 +
1040 +#ifdef CONFIG_PAX_REFCOUNT
1041 +"\n4:\n"
1042 + _ASM_EXTABLE(2b, 4b)
1043 +#endif
1044 +
1045 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1046 : "r" (&v->counter), "Ir" (i)
1047 : "cc");
1048 @@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1049 return oldval;
1050 }
1051
1052 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1053 +{
1054 + unsigned long oldval, res;
1055 +
1056 + smp_mb();
1057 +
1058 + do {
1059 + __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1060 + "ldrex %1, [%3]\n"
1061 + "mov %0, #0\n"
1062 + "teq %1, %4\n"
1063 + "strexeq %0, %5, [%3]\n"
1064 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1065 + : "r" (&ptr->counter), "Ir" (old), "r" (new)
1066 + : "cc");
1067 + } while (res);
1068 +
1069 + smp_mb();
1070 +
1071 + return oldval;
1072 +}
1073 +
1074 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1075 {
1076 unsigned long tmp, tmp2;
1077 @@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1078
1079 return val;
1080 }
1081 +
1082 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1083 +{
1084 + return atomic_add_return(i, v);
1085 +}
1086 +
1087 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1088 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1089 +{
1090 + (void) atomic_add_return(i, v);
1091 +}
1092
1093 static inline int atomic_sub_return(int i, atomic_t *v)
1094 {
1095 @@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1096 return val;
1097 }
1098 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1099 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1100 +{
1101 + (void) atomic_sub_return(i, v);
1102 +}
1103
1104 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1105 {
1106 @@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1107 return ret;
1108 }
1109
1110 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1111 +{
1112 + return atomic_cmpxchg(v, old, new);
1113 +}
1114 +
1115 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1116 {
1117 unsigned long flags;
1118 @@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1119 #endif /* __LINUX_ARM_ARCH__ */
1120
1121 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1122 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1123 +{
1124 + return xchg(&v->counter, new);
1125 +}
1126
1127 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1128 {
1129 @@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1130 }
1131
1132 #define atomic_inc(v) atomic_add(1, v)
1133 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1134 +{
1135 + atomic_add_unchecked(1, v);
1136 +}
1137 #define atomic_dec(v) atomic_sub(1, v)
1138 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1139 +{
1140 + atomic_sub_unchecked(1, v);
1141 +}
1142
1143 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1144 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1145 +{
1146 + return atomic_add_return_unchecked(1, v) == 0;
1147 +}
1148 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1149 #define atomic_inc_return(v) (atomic_add_return(1, v))
1150 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1151 +{
1152 + return atomic_add_return_unchecked(1, v);
1153 +}
1154 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1155 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1156
1157 @@ -241,6 +428,14 @@ typedef struct {
1158 u64 __aligned(8) counter;
1159 } atomic64_t;
1160
1161 +#ifdef CONFIG_PAX_REFCOUNT
1162 +typedef struct {
1163 + u64 __aligned(8) counter;
1164 +} atomic64_unchecked_t;
1165 +#else
1166 +typedef atomic64_t atomic64_unchecked_t;
1167 +#endif
1168 +
1169 #define ATOMIC64_INIT(i) { (i) }
1170
1171 #ifdef CONFIG_ARM_LPAE
1172 @@ -257,6 +452,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1173 return result;
1174 }
1175
1176 +static inline u64 atomic64_read_unchecked(const atomic64_unchecked_t *v)
1177 +{
1178 + u64 result;
1179 +
1180 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1181 +" ldrd %0, %H0, [%1]"
1182 + : "=&r" (result)
1183 + : "r" (&v->counter), "Qo" (v->counter)
1184 + );
1185 +
1186 + return result;
1187 +}
1188 +
1189 static inline void atomic64_set(atomic64_t *v, u64 i)
1190 {
1191 __asm__ __volatile__("@ atomic64_set\n"
1192 @@ -265,6 +473,15 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1193 : "r" (&v->counter), "r" (i)
1194 );
1195 }
1196 +
1197 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1198 +{
1199 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1200 +" strd %2, %H2, [%1]"
1201 + : "=Qo" (v->counter)
1202 + : "r" (&v->counter), "r" (i)
1203 + );
1204 +}
1205 #else
1206 static inline u64 atomic64_read(const atomic64_t *v)
1207 {
1208 @@ -279,6 +496,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1209 return result;
1210 }
1211
1212 +static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1213 +{
1214 + u64 result;
1215 +
1216 + __asm__ __volatile__("@ atomic64_read_unchecked\n"
1217 +" ldrexd %0, %H0, [%1]"
1218 + : "=&r" (result)
1219 + : "r" (&v->counter), "Qo" (v->counter)
1220 + );
1221 +
1222 + return result;
1223 +}
1224 +
1225 static inline void atomic64_set(atomic64_t *v, u64 i)
1226 {
1227 u64 tmp;
1228 @@ -292,6 +522,21 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1229 : "r" (&v->counter), "r" (i)
1230 : "cc");
1231 }
1232 +
1233 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1234 +{
1235 + u64 tmp;
1236 +
1237 + __asm__ __volatile__("@ atomic64_set_unchecked\n"
1238 +"1: ldrexd %0, %H0, [%2]\n"
1239 +" strexd %0, %3, %H3, [%2]\n"
1240 +" teq %0, #0\n"
1241 +" bne 1b"
1242 + : "=&r" (tmp), "=Qo" (v->counter)
1243 + : "r" (&v->counter), "r" (i)
1244 + : "cc");
1245 +}
1246 +
1247 #endif
1248
1249 static inline void atomic64_add(u64 i, atomic64_t *v)
1250 @@ -302,6 +547,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1251 __asm__ __volatile__("@ atomic64_add\n"
1252 "1: ldrexd %0, %H0, [%3]\n"
1253 " adds %0, %0, %4\n"
1254 +" adcs %H0, %H0, %H4\n"
1255 +
1256 +#ifdef CONFIG_PAX_REFCOUNT
1257 +" bvc 3f\n"
1258 +"2: bkpt 0xf103\n"
1259 +"3:\n"
1260 +#endif
1261 +
1262 +" strexd %1, %0, %H0, [%3]\n"
1263 +" teq %1, #0\n"
1264 +" bne 1b"
1265 +
1266 +#ifdef CONFIG_PAX_REFCOUNT
1267 +"\n4:\n"
1268 + _ASM_EXTABLE(2b, 4b)
1269 +#endif
1270 +
1271 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1272 + : "r" (&v->counter), "r" (i)
1273 + : "cc");
1274 +}
1275 +
1276 +static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1277 +{
1278 + u64 result;
1279 + unsigned long tmp;
1280 +
1281 + __asm__ __volatile__("@ atomic64_add_unchecked\n"
1282 +"1: ldrexd %0, %H0, [%3]\n"
1283 +" adds %0, %0, %4\n"
1284 " adc %H0, %H0, %H4\n"
1285 " strexd %1, %0, %H0, [%3]\n"
1286 " teq %1, #0\n"
1287 @@ -313,12 +588,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1288
1289 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1290 {
1291 - u64 result;
1292 - unsigned long tmp;
1293 + u64 result, tmp;
1294
1295 smp_mb();
1296
1297 __asm__ __volatile__("@ atomic64_add_return\n"
1298 +"1: ldrexd %1, %H1, [%3]\n"
1299 +" adds %0, %1, %4\n"
1300 +" adcs %H0, %H1, %H4\n"
1301 +
1302 +#ifdef CONFIG_PAX_REFCOUNT
1303 +" bvc 3f\n"
1304 +" mov %0, %1\n"
1305 +" mov %H0, %H1\n"
1306 +"2: bkpt 0xf103\n"
1307 +"3:\n"
1308 +#endif
1309 +
1310 +" strexd %1, %0, %H0, [%3]\n"
1311 +" teq %1, #0\n"
1312 +" bne 1b"
1313 +
1314 +#ifdef CONFIG_PAX_REFCOUNT
1315 +"\n4:\n"
1316 + _ASM_EXTABLE(2b, 4b)
1317 +#endif
1318 +
1319 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1320 + : "r" (&v->counter), "r" (i)
1321 + : "cc");
1322 +
1323 + smp_mb();
1324 +
1325 + return result;
1326 +}
1327 +
1328 +static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1329 +{
1330 + u64 result;
1331 + unsigned long tmp;
1332 +
1333 + smp_mb();
1334 +
1335 + __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1336 "1: ldrexd %0, %H0, [%3]\n"
1337 " adds %0, %0, %4\n"
1338 " adc %H0, %H0, %H4\n"
1339 @@ -342,6 +654,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1340 __asm__ __volatile__("@ atomic64_sub\n"
1341 "1: ldrexd %0, %H0, [%3]\n"
1342 " subs %0, %0, %4\n"
1343 +" sbcs %H0, %H0, %H4\n"
1344 +
1345 +#ifdef CONFIG_PAX_REFCOUNT
1346 +" bvc 3f\n"
1347 +"2: bkpt 0xf103\n"
1348 +"3:\n"
1349 +#endif
1350 +
1351 +" strexd %1, %0, %H0, [%3]\n"
1352 +" teq %1, #0\n"
1353 +" bne 1b"
1354 +
1355 +#ifdef CONFIG_PAX_REFCOUNT
1356 +"\n4:\n"
1357 + _ASM_EXTABLE(2b, 4b)
1358 +#endif
1359 +
1360 + : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1361 + : "r" (&v->counter), "r" (i)
1362 + : "cc");
1363 +}
1364 +
1365 +static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1366 +{
1367 + u64 result;
1368 + unsigned long tmp;
1369 +
1370 + __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1371 +"1: ldrexd %0, %H0, [%3]\n"
1372 +" subs %0, %0, %4\n"
1373 " sbc %H0, %H0, %H4\n"
1374 " strexd %1, %0, %H0, [%3]\n"
1375 " teq %1, #0\n"
1376 @@ -353,18 +695,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1377
1378 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1379 {
1380 - u64 result;
1381 - unsigned long tmp;
1382 + u64 result, tmp;
1383
1384 smp_mb();
1385
1386 __asm__ __volatile__("@ atomic64_sub_return\n"
1387 -"1: ldrexd %0, %H0, [%3]\n"
1388 -" subs %0, %0, %4\n"
1389 -" sbc %H0, %H0, %H4\n"
1390 +"1: ldrexd %1, %H1, [%3]\n"
1391 +" subs %0, %1, %4\n"
1392 +" sbcs %H0, %H1, %H4\n"
1393 +
1394 +#ifdef CONFIG_PAX_REFCOUNT
1395 +" bvc 3f\n"
1396 +" mov %0, %1\n"
1397 +" mov %H0, %H1\n"
1398 +"2: bkpt 0xf103\n"
1399 +"3:\n"
1400 +#endif
1401 +
1402 " strexd %1, %0, %H0, [%3]\n"
1403 " teq %1, #0\n"
1404 " bne 1b"
1405 +
1406 +#ifdef CONFIG_PAX_REFCOUNT
1407 +"\n4:\n"
1408 + _ASM_EXTABLE(2b, 4b)
1409 +#endif
1410 +
1411 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1412 : "r" (&v->counter), "r" (i)
1413 : "cc");
1414 @@ -398,6 +754,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1415 return oldval;
1416 }
1417
1418 +static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1419 +{
1420 + u64 oldval;
1421 + unsigned long res;
1422 +
1423 + smp_mb();
1424 +
1425 + do {
1426 + __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1427 + "ldrexd %1, %H1, [%3]\n"
1428 + "mov %0, #0\n"
1429 + "teq %1, %4\n"
1430 + "teqeq %H1, %H4\n"
1431 + "strexdeq %0, %5, %H5, [%3]"
1432 + : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1433 + : "r" (&ptr->counter), "r" (old), "r" (new)
1434 + : "cc");
1435 + } while (res);
1436 +
1437 + smp_mb();
1438 +
1439 + return oldval;
1440 +}
1441 +
1442 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1443 {
1444 u64 result;
1445 @@ -421,21 +801,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1446
1447 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1448 {
1449 - u64 result;
1450 - unsigned long tmp;
1451 + u64 result, tmp;
1452
1453 smp_mb();
1454
1455 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1456 -"1: ldrexd %0, %H0, [%3]\n"
1457 -" subs %0, %0, #1\n"
1458 -" sbc %H0, %H0, #0\n"
1459 +"1: ldrexd %1, %H1, [%3]\n"
1460 +" subs %0, %1, #1\n"
1461 +" sbcs %H0, %H1, #0\n"
1462 +
1463 +#ifdef CONFIG_PAX_REFCOUNT
1464 +" bvc 3f\n"
1465 +" mov %0, %1\n"
1466 +" mov %H0, %H1\n"
1467 +"2: bkpt 0xf103\n"
1468 +"3:\n"
1469 +#endif
1470 +
1471 " teq %H0, #0\n"
1472 -" bmi 2f\n"
1473 +" bmi 4f\n"
1474 " strexd %1, %0, %H0, [%3]\n"
1475 " teq %1, #0\n"
1476 " bne 1b\n"
1477 -"2:"
1478 +"4:\n"
1479 +
1480 +#ifdef CONFIG_PAX_REFCOUNT
1481 + _ASM_EXTABLE(2b, 4b)
1482 +#endif
1483 +
1484 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1485 : "r" (&v->counter)
1486 : "cc");
1487 @@ -458,13 +851,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1488 " teq %0, %5\n"
1489 " teqeq %H0, %H5\n"
1490 " moveq %1, #0\n"
1491 -" beq 2f\n"
1492 +" beq 4f\n"
1493 " adds %0, %0, %6\n"
1494 -" adc %H0, %H0, %H6\n"
1495 +" adcs %H0, %H0, %H6\n"
1496 +
1497 +#ifdef CONFIG_PAX_REFCOUNT
1498 +" bvc 3f\n"
1499 +"2: bkpt 0xf103\n"
1500 +"3:\n"
1501 +#endif
1502 +
1503 " strexd %2, %0, %H0, [%4]\n"
1504 " teq %2, #0\n"
1505 " bne 1b\n"
1506 -"2:"
1507 +"4:\n"
1508 +
1509 +#ifdef CONFIG_PAX_REFCOUNT
1510 + _ASM_EXTABLE(2b, 4b)
1511 +#endif
1512 +
1513 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1514 : "r" (&v->counter), "r" (u), "r" (a)
1515 : "cc");
1516 @@ -477,10 +882,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1517
1518 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1519 #define atomic64_inc(v) atomic64_add(1LL, (v))
1520 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1521 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1522 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1523 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1524 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1525 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1526 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1527 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1528 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1529 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1530 diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1531 index 75fe66b..ba3dee4 100644
1532 --- a/arch/arm/include/asm/cache.h
1533 +++ b/arch/arm/include/asm/cache.h
1534 @@ -4,8 +4,10 @@
1535 #ifndef __ASMARM_CACHE_H
1536 #define __ASMARM_CACHE_H
1537
1538 +#include <linux/const.h>
1539 +
1540 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1541 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1542 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1543
1544 /*
1545 * Memory returned by kmalloc() may be used for DMA, so we must make
1546 @@ -24,5 +26,6 @@
1547 #endif
1548
1549 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1550 +#define __read_only __attribute__ ((__section__(".data..read_only")))
1551
1552 #endif
1553 diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1554 index 15f2d5b..43ffa53 100644
1555 --- a/arch/arm/include/asm/cacheflush.h
1556 +++ b/arch/arm/include/asm/cacheflush.h
1557 @@ -116,7 +116,7 @@ struct cpu_cache_fns {
1558 void (*dma_unmap_area)(const void *, size_t, int);
1559
1560 void (*dma_flush_range)(const void *, const void *);
1561 -};
1562 +} __no_const;
1563
1564 /*
1565 * Select the calling method
1566 diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1567 index 6dcc164..b14d917 100644
1568 --- a/arch/arm/include/asm/checksum.h
1569 +++ b/arch/arm/include/asm/checksum.h
1570 @@ -37,7 +37,19 @@ __wsum
1571 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1572
1573 __wsum
1574 -csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1575 +__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1576 +
1577 +static inline __wsum
1578 +csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1579 +{
1580 + __wsum ret;
1581 + pax_open_userland();
1582 + ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1583 + pax_close_userland();
1584 + return ret;
1585 +}
1586 +
1587 +
1588
1589 /*
1590 * Fold a partial checksum without adding pseudo headers
1591 diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1592 index 4f009c1..466c59b 100644
1593 --- a/arch/arm/include/asm/cmpxchg.h
1594 +++ b/arch/arm/include/asm/cmpxchg.h
1595 @@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1596
1597 #define xchg(ptr,x) \
1598 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1599 +#define xchg_unchecked(ptr,x) \
1600 + ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1601
1602 #include <asm-generic/cmpxchg-local.h>
1603
1604 diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1605 index 6ddbe44..b5e38b1 100644
1606 --- a/arch/arm/include/asm/domain.h
1607 +++ b/arch/arm/include/asm/domain.h
1608 @@ -48,18 +48,37 @@
1609 * Domain types
1610 */
1611 #define DOMAIN_NOACCESS 0
1612 -#define DOMAIN_CLIENT 1
1613 #ifdef CONFIG_CPU_USE_DOMAINS
1614 +#define DOMAIN_USERCLIENT 1
1615 +#define DOMAIN_KERNELCLIENT 1
1616 #define DOMAIN_MANAGER 3
1617 +#define DOMAIN_VECTORS DOMAIN_USER
1618 #else
1619 +
1620 +#ifdef CONFIG_PAX_KERNEXEC
1621 #define DOMAIN_MANAGER 1
1622 +#define DOMAIN_KERNEXEC 3
1623 +#else
1624 +#define DOMAIN_MANAGER 1
1625 +#endif
1626 +
1627 +#ifdef CONFIG_PAX_MEMORY_UDEREF
1628 +#define DOMAIN_USERCLIENT 0
1629 +#define DOMAIN_UDEREF 1
1630 +#define DOMAIN_VECTORS DOMAIN_KERNEL
1631 +#else
1632 +#define DOMAIN_USERCLIENT 1
1633 +#define DOMAIN_VECTORS DOMAIN_USER
1634 +#endif
1635 +#define DOMAIN_KERNELCLIENT 1
1636 +
1637 #endif
1638
1639 #define domain_val(dom,type) ((type) << (2*(dom)))
1640
1641 #ifndef __ASSEMBLY__
1642
1643 -#ifdef CONFIG_CPU_USE_DOMAINS
1644 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1645 static inline void set_domain(unsigned val)
1646 {
1647 asm volatile(
1648 @@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1649 isb();
1650 }
1651
1652 -#define modify_domain(dom,type) \
1653 - do { \
1654 - struct thread_info *thread = current_thread_info(); \
1655 - unsigned int domain = thread->cpu_domain; \
1656 - domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1657 - thread->cpu_domain = domain | domain_val(dom, type); \
1658 - set_domain(thread->cpu_domain); \
1659 - } while (0)
1660 -
1661 +extern void modify_domain(unsigned int dom, unsigned int type);
1662 #else
1663 static inline void set_domain(unsigned val) { }
1664 static inline void modify_domain(unsigned dom, unsigned type) { }
1665 diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1666 index f4b46d3..abc9b2b 100644
1667 --- a/arch/arm/include/asm/elf.h
1668 +++ b/arch/arm/include/asm/elf.h
1669 @@ -114,7 +114,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1670 the loader. We need to make sure that it is out of the way of the program
1671 that it will "exec", and that there is sufficient room for the brk. */
1672
1673 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1674 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1675 +
1676 +#ifdef CONFIG_PAX_ASLR
1677 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1678 +
1679 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1680 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1681 +#endif
1682
1683 /* When the program starts, a1 contains a pointer to a function to be
1684 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1685 @@ -124,10 +131,6 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1686 extern void elf_set_personality(const struct elf32_hdr *);
1687 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1688
1689 -struct mm_struct;
1690 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1691 -#define arch_randomize_brk arch_randomize_brk
1692 -
1693 #ifdef CONFIG_MMU
1694 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1695 struct linux_binprm;
1696 diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1697 index de53547..52b9a28 100644
1698 --- a/arch/arm/include/asm/fncpy.h
1699 +++ b/arch/arm/include/asm/fncpy.h
1700 @@ -81,7 +81,9 @@
1701 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1702 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1703 \
1704 + pax_open_kernel(); \
1705 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1706 + pax_close_kernel(); \
1707 flush_icache_range((unsigned long)(dest_buf), \
1708 (unsigned long)(dest_buf) + (size)); \
1709 \
1710 diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1711 index e42cf59..7b94b8f 100644
1712 --- a/arch/arm/include/asm/futex.h
1713 +++ b/arch/arm/include/asm/futex.h
1714 @@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1715 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1716 return -EFAULT;
1717
1718 + pax_open_userland();
1719 +
1720 smp_mb();
1721 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1722 "1: ldrex %1, [%4]\n"
1723 @@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1724 : "cc", "memory");
1725 smp_mb();
1726
1727 + pax_close_userland();
1728 +
1729 *uval = val;
1730 return ret;
1731 }
1732 @@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1733 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1734 return -EFAULT;
1735
1736 + pax_open_userland();
1737 +
1738 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1739 "1: " TUSER(ldr) " %1, [%4]\n"
1740 " teq %1, %2\n"
1741 @@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1742 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1743 : "cc", "memory");
1744
1745 + pax_close_userland();
1746 +
1747 *uval = val;
1748 return ret;
1749 }
1750 @@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1751 return -EFAULT;
1752
1753 pagefault_disable(); /* implies preempt_disable() */
1754 + pax_open_userland();
1755
1756 switch (op) {
1757 case FUTEX_OP_SET:
1758 @@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1759 ret = -ENOSYS;
1760 }
1761
1762 + pax_close_userland();
1763 pagefault_enable(); /* subsumes preempt_enable() */
1764
1765 if (!ret) {
1766 diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1767 index 83eb2f7..ed77159 100644
1768 --- a/arch/arm/include/asm/kmap_types.h
1769 +++ b/arch/arm/include/asm/kmap_types.h
1770 @@ -4,6 +4,6 @@
1771 /*
1772 * This is the "bare minimum". AIO seems to require this.
1773 */
1774 -#define KM_TYPE_NR 16
1775 +#define KM_TYPE_NR 17
1776
1777 #endif
1778 diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1779 index 9e614a1..3302cca 100644
1780 --- a/arch/arm/include/asm/mach/dma.h
1781 +++ b/arch/arm/include/asm/mach/dma.h
1782 @@ -22,7 +22,7 @@ struct dma_ops {
1783 int (*residue)(unsigned int, dma_t *); /* optional */
1784 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1785 const char *type;
1786 -};
1787 +} __do_const;
1788
1789 struct dma_struct {
1790 void *addr; /* single DMA address */
1791 diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1792 index 2fe141f..192dc01 100644
1793 --- a/arch/arm/include/asm/mach/map.h
1794 +++ b/arch/arm/include/asm/mach/map.h
1795 @@ -27,13 +27,16 @@ struct map_desc {
1796 #define MT_MINICLEAN 6
1797 #define MT_LOW_VECTORS 7
1798 #define MT_HIGH_VECTORS 8
1799 -#define MT_MEMORY 9
1800 +#define MT_MEMORY_RWX 9
1801 #define MT_ROM 10
1802 -#define MT_MEMORY_NONCACHED 11
1803 +#define MT_MEMORY_NONCACHED_RX 11
1804 #define MT_MEMORY_DTCM 12
1805 #define MT_MEMORY_ITCM 13
1806 #define MT_MEMORY_SO 14
1807 #define MT_MEMORY_DMA_READY 15
1808 +#define MT_MEMORY_RW 16
1809 +#define MT_MEMORY_RX 17
1810 +#define MT_MEMORY_NONCACHED_RW 18
1811
1812 #ifdef CONFIG_MMU
1813 extern void iotable_init(struct map_desc *, int);
1814 diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1815 index f94784f..9a09a4a 100644
1816 --- a/arch/arm/include/asm/outercache.h
1817 +++ b/arch/arm/include/asm/outercache.h
1818 @@ -35,7 +35,7 @@ struct outer_cache_fns {
1819 #endif
1820 void (*set_debug)(unsigned long);
1821 void (*resume)(void);
1822 -};
1823 +} __no_const;
1824
1825 extern struct outer_cache_fns outer_cache;
1826
1827 diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1828 index 4355f0e..c229913 100644
1829 --- a/arch/arm/include/asm/page.h
1830 +++ b/arch/arm/include/asm/page.h
1831 @@ -114,7 +114,7 @@ struct cpu_user_fns {
1832 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1833 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1834 unsigned long vaddr, struct vm_area_struct *vma);
1835 -};
1836 +} __no_const;
1837
1838 #ifdef MULTI_USER
1839 extern struct cpu_user_fns cpu_user;
1840 diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1841 index 943504f..c37a730 100644
1842 --- a/arch/arm/include/asm/pgalloc.h
1843 +++ b/arch/arm/include/asm/pgalloc.h
1844 @@ -17,6 +17,7 @@
1845 #include <asm/processor.h>
1846 #include <asm/cacheflush.h>
1847 #include <asm/tlbflush.h>
1848 +#include <asm/system_info.h>
1849
1850 #define check_pgt_cache() do { } while (0)
1851
1852 @@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1853 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1854 }
1855
1856 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1857 +{
1858 + pud_populate(mm, pud, pmd);
1859 +}
1860 +
1861 #else /* !CONFIG_ARM_LPAE */
1862
1863 /*
1864 @@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1865 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1866 #define pmd_free(mm, pmd) do { } while (0)
1867 #define pud_populate(mm,pmd,pte) BUG()
1868 +#define pud_populate_kernel(mm,pmd,pte) BUG()
1869
1870 #endif /* CONFIG_ARM_LPAE */
1871
1872 @@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1873 __free_page(pte);
1874 }
1875
1876 +static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1877 +{
1878 +#ifdef CONFIG_ARM_LPAE
1879 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1880 +#else
1881 + if (addr & SECTION_SIZE)
1882 + pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1883 + else
1884 + pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1885 +#endif
1886 + flush_pmd_entry(pmdp);
1887 +}
1888 +
1889 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1890 pmdval_t prot)
1891 {
1892 @@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1893 static inline void
1894 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1895 {
1896 - __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1897 + __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1898 }
1899 #define pmd_pgtable(pmd) pmd_page(pmd)
1900
1901 diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1902 index 5cfba15..f415e1a 100644
1903 --- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1904 +++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1905 @@ -20,12 +20,15 @@
1906 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1907 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1908 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1909 +#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1910 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1911 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1912 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1913 +
1914 /*
1915 * - section
1916 */
1917 +#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1918 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1919 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1920 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1921 @@ -37,6 +40,7 @@
1922 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1923 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1924 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1925 +#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1926
1927 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1928 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1929 @@ -66,6 +70,7 @@
1930 * - extended small page/tiny page
1931 */
1932 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1933 +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1934 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1935 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1936 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1937 diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1938 index f97ee02..cc9fe9e 100644
1939 --- a/arch/arm/include/asm/pgtable-2level.h
1940 +++ b/arch/arm/include/asm/pgtable-2level.h
1941 @@ -126,6 +126,9 @@
1942 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1943 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1944
1945 +/* Two-level page tables only have PXN in the PGD, not in the PTE. */
1946 +#define L_PTE_PXN (_AT(pteval_t, 0))
1947 +
1948 /*
1949 * These are the memory types, defined to be compatible with
1950 * pre-ARMv6 CPUs cacheable and bufferable bits: XXCB
1951 diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1952 index 626989f..9d67a33 100644
1953 --- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1954 +++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1955 @@ -75,6 +75,7 @@
1956 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1957 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1958 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1959 +#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1960 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1961
1962 /*
1963 diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1964 index 5689c18..eea12f9 100644
1965 --- a/arch/arm/include/asm/pgtable-3level.h
1966 +++ b/arch/arm/include/asm/pgtable-3level.h
1967 @@ -82,6 +82,7 @@
1968 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1969 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1970 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1971 +#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1972 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1973 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1974 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1975 @@ -95,6 +96,7 @@
1976 /*
1977 * To be used in assembly code with the upper page attributes.
1978 */
1979 +#define L_PTE_PXN_HIGH (1 << (53 - 32))
1980 #define L_PTE_XN_HIGH (1 << (54 - 32))
1981 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1982
1983 diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1984 index 1571d12..b8a9b43 100644
1985 --- a/arch/arm/include/asm/pgtable.h
1986 +++ b/arch/arm/include/asm/pgtable.h
1987 @@ -33,6 +33,9 @@
1988 #include <asm/pgtable-2level.h>
1989 #endif
1990
1991 +#define ktla_ktva(addr) (addr)
1992 +#define ktva_ktla(addr) (addr)
1993 +
1994 /*
1995 * Just any arbitrary offset to the start of the vmalloc VM area: the
1996 * current 8MB value just means that there will be a 8MB "hole" after the
1997 @@ -48,6 +51,9 @@
1998 #define LIBRARY_TEXT_START 0x0c000000
1999
2000 #ifndef __ASSEMBLY__
2001 +extern pteval_t __supported_pte_mask;
2002 +extern pmdval_t __supported_pmd_mask;
2003 +
2004 extern void __pte_error(const char *file, int line, pte_t);
2005 extern void __pmd_error(const char *file, int line, pmd_t);
2006 extern void __pgd_error(const char *file, int line, pgd_t);
2007 @@ -56,6 +62,48 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2008 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2009 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2010
2011 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
2012 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2013 +
2014 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2015 +#include <asm/domain.h>
2016 +#include <linux/thread_info.h>
2017 +#include <linux/preempt.h>
2018 +
2019 +static inline int test_domain(int domain, int domaintype)
2020 +{
2021 + return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2022 +}
2023 +#endif
2024 +
2025 +#ifdef CONFIG_PAX_KERNEXEC
2026 +static inline unsigned long pax_open_kernel(void) {
2027 +#ifdef CONFIG_ARM_LPAE
2028 + /* TODO */
2029 +#else
2030 + preempt_disable();
2031 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2032 + modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2033 +#endif
2034 + return 0;
2035 +}
2036 +
2037 +static inline unsigned long pax_close_kernel(void) {
2038 +#ifdef CONFIG_ARM_LPAE
2039 + /* TODO */
2040 +#else
2041 + BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2042 + /* DOMAIN_MANAGER = "client" under KERNEXEC */
2043 + modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2044 + preempt_enable_no_resched();
2045 +#endif
2046 + return 0;
2047 +}
2048 +#else
2049 +static inline unsigned long pax_open_kernel(void) { return 0; }
2050 +static inline unsigned long pax_close_kernel(void) { return 0; }
2051 +#endif
2052 +
2053 /*
2054 * This is the lowest virtual address we can permit any user space
2055 * mapping to be mapped at. This is particularly important for
2056 @@ -75,8 +123,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2057 /*
2058 * The pgprot_* and protection_map entries will be fixed up in runtime
2059 * to include the cachable and bufferable bits based on memory policy,
2060 - * as well as any architecture dependent bits like global/ASID and SMP
2061 - * shared mapping bits.
2062 + * as well as any architecture dependent bits like global/ASID, PXN,
2063 + * and SMP shared mapping bits.
2064 */
2065 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2066
2067 @@ -260,7 +308,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2068 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2069 {
2070 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2071 - L_PTE_NONE | L_PTE_VALID;
2072 + L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2073 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2074 return pte;
2075 }
2076 diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2077 index 5324c11..bcae5f0 100644
2078 --- a/arch/arm/include/asm/proc-fns.h
2079 +++ b/arch/arm/include/asm/proc-fns.h
2080 @@ -75,7 +75,7 @@ extern struct processor {
2081 unsigned int suspend_size;
2082 void (*do_suspend)(void *);
2083 void (*do_resume)(void *);
2084 -} processor;
2085 +} __do_const processor;
2086
2087 #ifndef MULTI_CPU
2088 extern void cpu_proc_init(void);
2089 diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2090 index c4ae171..ea0c0c2 100644
2091 --- a/arch/arm/include/asm/psci.h
2092 +++ b/arch/arm/include/asm/psci.h
2093 @@ -29,7 +29,7 @@ struct psci_operations {
2094 int (*cpu_off)(struct psci_power_state state);
2095 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2096 int (*migrate)(unsigned long cpuid);
2097 -};
2098 +} __no_const;
2099
2100 extern struct psci_operations psci_ops;
2101 extern struct smp_operations psci_smp_ops;
2102 diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2103 index a8cae71c..65dd797 100644
2104 --- a/arch/arm/include/asm/smp.h
2105 +++ b/arch/arm/include/asm/smp.h
2106 @@ -110,7 +110,7 @@ struct smp_operations {
2107 int (*cpu_disable)(unsigned int cpu);
2108 #endif
2109 #endif
2110 -};
2111 +} __no_const;
2112
2113 /*
2114 * set platform specific SMP operations
2115 diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2116 index df5e13d..97efb82 100644
2117 --- a/arch/arm/include/asm/thread_info.h
2118 +++ b/arch/arm/include/asm/thread_info.h
2119 @@ -88,9 +88,9 @@ struct thread_info {
2120 .flags = 0, \
2121 .preempt_count = INIT_PREEMPT_COUNT, \
2122 .addr_limit = KERNEL_DS, \
2123 - .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2124 - domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2125 - domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2126 + .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2127 + domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2128 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2129 .restart_block = { \
2130 .fn = do_no_restart_syscall, \
2131 }, \
2132 @@ -163,7 +163,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2133 #define TIF_SYSCALL_AUDIT 9
2134 #define TIF_SYSCALL_TRACEPOINT 10
2135 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2136 -#define TIF_NOHZ 12 /* in adaptive nohz mode */
2137 +/* within 8 bits of TIF_SYSCALL_TRACE
2138 + * to meet flexible second operand requirements
2139 + */
2140 +#define TIF_GRSEC_SETXID 12
2141 +#define TIF_NOHZ 13 /* in adaptive nohz mode */
2142 #define TIF_USING_IWMMXT 17
2143 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2144 #define TIF_RESTORE_SIGMASK 20
2145 @@ -176,10 +180,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2146 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2147 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2148 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2149 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2150
2151 /* Checks for any syscall work in entry-common.S */
2152 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2153 - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2154 + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2155
2156 /*
2157 * Change these and you break ASM code in entry-common.S
2158 diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2159 index 72abdc5..9eba222 100644
2160 --- a/arch/arm/include/asm/uaccess.h
2161 +++ b/arch/arm/include/asm/uaccess.h
2162 @@ -18,6 +18,7 @@
2163 #include <asm/domain.h>
2164 #include <asm/unified.h>
2165 #include <asm/compiler.h>
2166 +#include <asm/pgtable.h>
2167
2168 #if __LINUX_ARM_ARCH__ < 6
2169 #include <asm-generic/uaccess-unaligned.h>
2170 @@ -70,11 +71,38 @@ extern int __put_user_bad(void);
2171 static inline void set_fs(mm_segment_t fs)
2172 {
2173 current_thread_info()->addr_limit = fs;
2174 - modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2175 + modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2176 }
2177
2178 #define segment_eq(a,b) ((a) == (b))
2179
2180 +#define __HAVE_ARCH_PAX_OPEN_USERLAND
2181 +#define __HAVE_ARCH_PAX_CLOSE_USERLAND
2182 +
2183 +static inline void pax_open_userland(void)
2184 +{
2185 +
2186 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2187 + if (segment_eq(get_fs(), USER_DS)) {
2188 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2189 + modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2190 + }
2191 +#endif
2192 +
2193 +}
2194 +
2195 +static inline void pax_close_userland(void)
2196 +{
2197 +
2198 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2199 + if (segment_eq(get_fs(), USER_DS)) {
2200 + BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2201 + modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2202 + }
2203 +#endif
2204 +
2205 +}
2206 +
2207 #define __addr_ok(addr) ({ \
2208 unsigned long flag; \
2209 __asm__("cmp %2, %0; movlo %0, #0" \
2210 @@ -150,8 +178,12 @@ extern int __get_user_4(void *);
2211
2212 #define get_user(x,p) \
2213 ({ \
2214 + int __e; \
2215 might_fault(); \
2216 - __get_user_check(x,p); \
2217 + pax_open_userland(); \
2218 + __e = __get_user_check(x,p); \
2219 + pax_close_userland(); \
2220 + __e; \
2221 })
2222
2223 extern int __put_user_1(void *, unsigned int);
2224 @@ -195,8 +227,12 @@ extern int __put_user_8(void *, unsigned long long);
2225
2226 #define put_user(x,p) \
2227 ({ \
2228 + int __e; \
2229 might_fault(); \
2230 - __put_user_check(x,p); \
2231 + pax_open_userland(); \
2232 + __e = __put_user_check(x,p); \
2233 + pax_close_userland(); \
2234 + __e; \
2235 })
2236
2237 #else /* CONFIG_MMU */
2238 @@ -237,13 +273,17 @@ static inline void set_fs(mm_segment_t fs)
2239 #define __get_user(x,ptr) \
2240 ({ \
2241 long __gu_err = 0; \
2242 + pax_open_userland(); \
2243 __get_user_err((x),(ptr),__gu_err); \
2244 + pax_close_userland(); \
2245 __gu_err; \
2246 })
2247
2248 #define __get_user_error(x,ptr,err) \
2249 ({ \
2250 + pax_open_userland(); \
2251 __get_user_err((x),(ptr),err); \
2252 + pax_close_userland(); \
2253 (void) 0; \
2254 })
2255
2256 @@ -319,13 +359,17 @@ do { \
2257 #define __put_user(x,ptr) \
2258 ({ \
2259 long __pu_err = 0; \
2260 + pax_open_userland(); \
2261 __put_user_err((x),(ptr),__pu_err); \
2262 + pax_close_userland(); \
2263 __pu_err; \
2264 })
2265
2266 #define __put_user_error(x,ptr,err) \
2267 ({ \
2268 + pax_open_userland(); \
2269 __put_user_err((x),(ptr),err); \
2270 + pax_close_userland(); \
2271 (void) 0; \
2272 })
2273
2274 @@ -425,11 +469,44 @@ do { \
2275
2276
2277 #ifdef CONFIG_MMU
2278 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2279 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2280 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2281 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2282 +
2283 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2284 +{
2285 + unsigned long ret;
2286 +
2287 + check_object_size(to, n, false);
2288 + pax_open_userland();
2289 + ret = ___copy_from_user(to, from, n);
2290 + pax_close_userland();
2291 + return ret;
2292 +}
2293 +
2294 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2295 +{
2296 + unsigned long ret;
2297 +
2298 + check_object_size(from, n, true);
2299 + pax_open_userland();
2300 + ret = ___copy_to_user(to, from, n);
2301 + pax_close_userland();
2302 + return ret;
2303 +}
2304 +
2305 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2306 -extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2307 +extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2308 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2309 +
2310 +static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2311 +{
2312 + unsigned long ret;
2313 + pax_open_userland();
2314 + ret = ___clear_user(addr, n);
2315 + pax_close_userland();
2316 + return ret;
2317 +}
2318 +
2319 #else
2320 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2321 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2322 @@ -438,6 +515,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2323
2324 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2325 {
2326 + if ((long)n < 0)
2327 + return n;
2328 +
2329 if (access_ok(VERIFY_READ, from, n))
2330 n = __copy_from_user(to, from, n);
2331 else /* security hole - plug it */
2332 @@ -447,6 +527,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2333
2334 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2335 {
2336 + if ((long)n < 0)
2337 + return n;
2338 +
2339 if (access_ok(VERIFY_WRITE, to, n))
2340 n = __copy_to_user(to, from, n);
2341 return n;
2342 diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2343 index 5af0ed1..cea83883 100644
2344 --- a/arch/arm/include/uapi/asm/ptrace.h
2345 +++ b/arch/arm/include/uapi/asm/ptrace.h
2346 @@ -92,7 +92,7 @@
2347 * ARMv7 groups of PSR bits
2348 */
2349 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2350 -#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2351 +#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2352 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2353 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2354
2355 diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2356 index 60d3b73..e5a0f22 100644
2357 --- a/arch/arm/kernel/armksyms.c
2358 +++ b/arch/arm/kernel/armksyms.c
2359 @@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2360
2361 /* networking */
2362 EXPORT_SYMBOL(csum_partial);
2363 -EXPORT_SYMBOL(csum_partial_copy_from_user);
2364 +EXPORT_SYMBOL(__csum_partial_copy_from_user);
2365 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2366 EXPORT_SYMBOL(__csum_ipv6_magic);
2367
2368 @@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2369 #ifdef CONFIG_MMU
2370 EXPORT_SYMBOL(copy_page);
2371
2372 -EXPORT_SYMBOL(__copy_from_user);
2373 -EXPORT_SYMBOL(__copy_to_user);
2374 -EXPORT_SYMBOL(__clear_user);
2375 +EXPORT_SYMBOL(___copy_from_user);
2376 +EXPORT_SYMBOL(___copy_to_user);
2377 +EXPORT_SYMBOL(___clear_user);
2378
2379 EXPORT_SYMBOL(__get_user_1);
2380 EXPORT_SYMBOL(__get_user_2);
2381 diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2382 index ec3e5cf..b450ee3 100644
2383 --- a/arch/arm/kernel/entry-armv.S
2384 +++ b/arch/arm/kernel/entry-armv.S
2385 @@ -47,6 +47,87 @@
2386 9997:
2387 .endm
2388
2389 + .macro pax_enter_kernel
2390 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2391 + @ make aligned space for saved DACR
2392 + sub sp, sp, #8
2393 + @ save regs
2394 + stmdb sp!, {r1, r2}
2395 + @ read DACR from cpu_domain into r1
2396 + mov r2, sp
2397 + @ assume 8K pages, since we have to split the immediate in two
2398 + bic r2, r2, #(0x1fc0)
2399 + bic r2, r2, #(0x3f)
2400 + ldr r1, [r2, #TI_CPU_DOMAIN]
2401 + @ store old DACR on stack
2402 + str r1, [sp, #8]
2403 +#ifdef CONFIG_PAX_KERNEXEC
2404 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2405 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2406 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2407 +#endif
2408 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2409 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2410 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2411 +#endif
2412 + @ write r1 to current_thread_info()->cpu_domain
2413 + str r1, [r2, #TI_CPU_DOMAIN]
2414 + @ write r1 to DACR
2415 + mcr p15, 0, r1, c3, c0, 0
2416 + @ instruction sync
2417 + instr_sync
2418 + @ restore regs
2419 + ldmia sp!, {r1, r2}
2420 +#endif
2421 + .endm
2422 +
2423 + .macro pax_open_userland
2424 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2425 + @ save regs
2426 + stmdb sp!, {r0, r1}
2427 + @ read DACR from cpu_domain into r1
2428 + mov r0, sp
2429 + @ assume 8K pages, since we have to split the immediate in two
2430 + bic r0, r0, #(0x1fc0)
2431 + bic r0, r0, #(0x3f)
2432 + ldr r1, [r0, #TI_CPU_DOMAIN]
2433 + @ set current DOMAIN_USER to DOMAIN_CLIENT
2434 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2435 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2436 + @ write r1 to current_thread_info()->cpu_domain
2437 + str r1, [r0, #TI_CPU_DOMAIN]
2438 + @ write r1 to DACR
2439 + mcr p15, 0, r1, c3, c0, 0
2440 + @ instruction sync
2441 + instr_sync
2442 + @ restore regs
2443 + ldmia sp!, {r0, r1}
2444 +#endif
2445 + .endm
2446 +
2447 + .macro pax_close_userland
2448 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2449 + @ save regs
2450 + stmdb sp!, {r0, r1}
2451 + @ read DACR from cpu_domain into r1
2452 + mov r0, sp
2453 + @ assume 8K pages, since we have to split the immediate in two
2454 + bic r0, r0, #(0x1fc0)
2455 + bic r0, r0, #(0x3f)
2456 + ldr r1, [r0, #TI_CPU_DOMAIN]
2457 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2458 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2459 + @ write r1 to current_thread_info()->cpu_domain
2460 + str r1, [r0, #TI_CPU_DOMAIN]
2461 + @ write r1 to DACR
2462 + mcr p15, 0, r1, c3, c0, 0
2463 + @ instruction sync
2464 + instr_sync
2465 + @ restore regs
2466 + ldmia sp!, {r0, r1}
2467 +#endif
2468 + .endm
2469 +
2470 .macro pabt_helper
2471 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2472 #ifdef MULTI_PABORT
2473 @@ -89,11 +170,15 @@
2474 * Invalid mode handlers
2475 */
2476 .macro inv_entry, reason
2477 +
2478 + pax_enter_kernel
2479 +
2480 sub sp, sp, #S_FRAME_SIZE
2481 ARM( stmib sp, {r1 - lr} )
2482 THUMB( stmia sp, {r0 - r12} )
2483 THUMB( str sp, [sp, #S_SP] )
2484 THUMB( str lr, [sp, #S_LR] )
2485 +
2486 mov r1, #\reason
2487 .endm
2488
2489 @@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2490 .macro svc_entry, stack_hole=0
2491 UNWIND(.fnstart )
2492 UNWIND(.save {r0 - pc} )
2493 +
2494 + pax_enter_kernel
2495 +
2496 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2497 +
2498 #ifdef CONFIG_THUMB2_KERNEL
2499 SPFIX( str r0, [sp] ) @ temporarily saved
2500 SPFIX( mov r0, sp )
2501 @@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2502 ldmia r0, {r3 - r5}
2503 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2504 mov r6, #-1 @ "" "" "" ""
2505 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2506 + @ offset sp by 8 as done in pax_enter_kernel
2507 + add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2508 +#else
2509 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2510 +#endif
2511 SPFIX( addeq r2, r2, #4 )
2512 str r3, [sp, #-4]! @ save the "real" r0 copied
2513 @ from the exception stack
2514 @@ -317,6 +411,9 @@ ENDPROC(__pabt_svc)
2515 .macro usr_entry
2516 UNWIND(.fnstart )
2517 UNWIND(.cantunwind ) @ don't unwind the user space
2518 +
2519 + pax_enter_kernel_user
2520 +
2521 sub sp, sp, #S_FRAME_SIZE
2522 ARM( stmib sp, {r1 - r12} )
2523 THUMB( stmia sp, {r0 - r12} )
2524 @@ -416,7 +513,9 @@ __und_usr:
2525 tst r3, #PSR_T_BIT @ Thumb mode?
2526 bne __und_usr_thumb
2527 sub r4, r2, #4 @ ARM instr at LR - 4
2528 + pax_open_userland
2529 1: ldrt r0, [r4]
2530 + pax_close_userland
2531 #ifdef CONFIG_CPU_ENDIAN_BE8
2532 rev r0, r0 @ little endian instruction
2533 #endif
2534 @@ -451,10 +550,14 @@ __und_usr_thumb:
2535 */
2536 .arch armv6t2
2537 #endif
2538 + pax_open_userland
2539 2: ldrht r5, [r4]
2540 + pax_close_userland
2541 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2542 blo __und_usr_fault_16 @ 16bit undefined instruction
2543 + pax_open_userland
2544 3: ldrht r0, [r2]
2545 + pax_close_userland
2546 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2547 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2548 orr r0, r0, r5, lsl #16
2549 @@ -483,7 +586,8 @@ ENDPROC(__und_usr)
2550 */
2551 .pushsection .fixup, "ax"
2552 .align 2
2553 -4: mov pc, r9
2554 +4: pax_close_userland
2555 + mov pc, r9
2556 .popsection
2557 .pushsection __ex_table,"a"
2558 .long 1b, 4b
2559 @@ -693,7 +797,7 @@ ENTRY(__switch_to)
2560 THUMB( str lr, [ip], #4 )
2561 ldr r4, [r2, #TI_TP_VALUE]
2562 ldr r5, [r2, #TI_TP_VALUE + 4]
2563 -#ifdef CONFIG_CPU_USE_DOMAINS
2564 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2565 ldr r6, [r2, #TI_CPU_DOMAIN]
2566 #endif
2567 switch_tls r1, r4, r5, r3, r7
2568 @@ -702,7 +806,7 @@ ENTRY(__switch_to)
2569 ldr r8, =__stack_chk_guard
2570 ldr r7, [r7, #TSK_STACK_CANARY]
2571 #endif
2572 -#ifdef CONFIG_CPU_USE_DOMAINS
2573 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2574 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2575 #endif
2576 mov r5, r0
2577 diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2578 index bc6bd96..bd026cb 100644
2579 --- a/arch/arm/kernel/entry-common.S
2580 +++ b/arch/arm/kernel/entry-common.S
2581 @@ -10,18 +10,46 @@
2582
2583 #include <asm/unistd.h>
2584 #include <asm/ftrace.h>
2585 +#include <asm/domain.h>
2586 #include <asm/unwind.h>
2587
2588 +#include "entry-header.S"
2589 +
2590 #ifdef CONFIG_NEED_RET_TO_USER
2591 #include <mach/entry-macro.S>
2592 #else
2593 .macro arch_ret_to_user, tmp1, tmp2
2594 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2595 + @ save regs
2596 + stmdb sp!, {r1, r2}
2597 + @ read DACR from cpu_domain into r1
2598 + mov r2, sp
2599 + @ assume 8K pages, since we have to split the immediate in two
2600 + bic r2, r2, #(0x1fc0)
2601 + bic r2, r2, #(0x3f)
2602 + ldr r1, [r2, #TI_CPU_DOMAIN]
2603 +#ifdef CONFIG_PAX_KERNEXEC
2604 + @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2605 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2606 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2607 +#endif
2608 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2609 + @ set current DOMAIN_USER to DOMAIN_UDEREF
2610 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2611 + orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2612 +#endif
2613 + @ write r1 to current_thread_info()->cpu_domain
2614 + str r1, [r2, #TI_CPU_DOMAIN]
2615 + @ write r1 to DACR
2616 + mcr p15, 0, r1, c3, c0, 0
2617 + @ instruction sync
2618 + instr_sync
2619 + @ restore regs
2620 + ldmia sp!, {r1, r2}
2621 +#endif
2622 .endm
2623 #endif
2624
2625 -#include "entry-header.S"
2626 -
2627 -
2628 .align 5
2629 /*
2630 * This is the fast syscall return path. We do as little as
2631 @@ -413,6 +441,12 @@ ENTRY(vector_swi)
2632 USER( ldr scno, [lr, #-4] ) @ get SWI instruction
2633 #endif
2634
2635 + /*
2636 + * do this here to avoid a performance hit of wrapping the code above
2637 + * that directly dereferences userland to parse the SWI instruction
2638 + */
2639 + pax_enter_kernel_user
2640 +
2641 adr tbl, sys_call_table @ load syscall table pointer
2642
2643 #if defined(CONFIG_OABI_COMPAT)
2644 diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2645 index 39f89fb..d612bd9 100644
2646 --- a/arch/arm/kernel/entry-header.S
2647 +++ b/arch/arm/kernel/entry-header.S
2648 @@ -184,6 +184,60 @@
2649 msr cpsr_c, \rtemp @ switch back to the SVC mode
2650 .endm
2651
2652 + .macro pax_enter_kernel_user
2653 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2654 + @ save regs
2655 + stmdb sp!, {r0, r1}
2656 + @ read DACR from cpu_domain into r1
2657 + mov r0, sp
2658 + @ assume 8K pages, since we have to split the immediate in two
2659 + bic r0, r0, #(0x1fc0)
2660 + bic r0, r0, #(0x3f)
2661 + ldr r1, [r0, #TI_CPU_DOMAIN]
2662 +#ifdef CONFIG_PAX_MEMORY_UDEREF
2663 + @ set current DOMAIN_USER to DOMAIN_NOACCESS
2664 + bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2665 +#endif
2666 +#ifdef CONFIG_PAX_KERNEXEC
2667 + @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2668 + bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2669 + orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2670 +#endif
2671 + @ write r1 to current_thread_info()->cpu_domain
2672 + str r1, [r0, #TI_CPU_DOMAIN]
2673 + @ write r1 to DACR
2674 + mcr p15, 0, r1, c3, c0, 0
2675 + @ instruction sync
2676 + instr_sync
2677 + @ restore regs
2678 + ldmia sp!, {r0, r1}
2679 +#endif
2680 + .endm
2681 +
2682 + .macro pax_exit_kernel
2683 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2684 + @ save regs
2685 + stmdb sp!, {r0, r1}
2686 + @ read old DACR from stack into r1
2687 + ldr r1, [sp, #(8 + S_SP)]
2688 + sub r1, r1, #8
2689 + ldr r1, [r1]
2690 +
2691 + @ write r1 to current_thread_info()->cpu_domain
2692 + mov r0, sp
2693 + @ assume 8K pages, since we have to split the immediate in two
2694 + bic r0, r0, #(0x1fc0)
2695 + bic r0, r0, #(0x3f)
2696 + str r1, [r0, #TI_CPU_DOMAIN]
2697 + @ write r1 to DACR
2698 + mcr p15, 0, r1, c3, c0, 0
2699 + @ instruction sync
2700 + instr_sync
2701 + @ restore regs
2702 + ldmia sp!, {r0, r1}
2703 +#endif
2704 + .endm
2705 +
2706 #ifndef CONFIG_THUMB2_KERNEL
2707 .macro svc_exit, rpsr, irq = 0
2708 .if \irq != 0
2709 @@ -203,6 +257,9 @@
2710 blne trace_hardirqs_off
2711 #endif
2712 .endif
2713 +
2714 + pax_exit_kernel
2715 +
2716 msr spsr_cxsf, \rpsr
2717 #if defined(CONFIG_CPU_V6)
2718 ldr r0, [sp]
2719 @@ -266,6 +323,9 @@
2720 blne trace_hardirqs_off
2721 #endif
2722 .endif
2723 +
2724 + pax_exit_kernel
2725 +
2726 ldr lr, [sp, #S_SP] @ top of the stack
2727 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2728 clrex @ clear the exclusive monitor
2729 diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2730 index 918875d..cd5fa27 100644
2731 --- a/arch/arm/kernel/fiq.c
2732 +++ b/arch/arm/kernel/fiq.c
2733 @@ -87,7 +87,10 @@ void set_fiq_handler(void *start, unsigned int length)
2734 void *base = vectors_page;
2735 unsigned offset = FIQ_OFFSET;
2736
2737 + pax_open_kernel();
2738 memcpy(base + offset, start, length);
2739 + pax_close_kernel();
2740 +
2741 if (!cache_is_vipt_nonaliasing())
2742 flush_icache_range((unsigned long)base + offset, offset +
2743 length);
2744 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2745 index 476de57..4857a76 100644
2746 --- a/arch/arm/kernel/head.S
2747 +++ b/arch/arm/kernel/head.S
2748 @@ -52,7 +52,9 @@
2749 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2750
2751 .macro pgtbl, rd, phys
2752 - add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2753 + mov \rd, #TEXT_OFFSET
2754 + sub \rd, #PG_DIR_SIZE
2755 + add \rd, \rd, \phys
2756 .endm
2757
2758 /*
2759 @@ -432,7 +434,7 @@ __enable_mmu:
2760 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2761 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2762 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2763 - domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2764 + domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2765 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2766 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2767 #endif
2768 diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2769 index 084dc88..fce4e68 100644
2770 --- a/arch/arm/kernel/module.c
2771 +++ b/arch/arm/kernel/module.c
2772 @@ -37,12 +37,39 @@
2773 #endif
2774
2775 #ifdef CONFIG_MMU
2776 -void *module_alloc(unsigned long size)
2777 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2778 {
2779 + if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2780 + return NULL;
2781 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2782 - GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2783 + GFP_KERNEL, prot, -1,
2784 __builtin_return_address(0));
2785 }
2786 +
2787 +void *module_alloc(unsigned long size)
2788 +{
2789 +
2790 +#ifdef CONFIG_PAX_KERNEXEC
2791 + return __module_alloc(size, PAGE_KERNEL);
2792 +#else
2793 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2794 +#endif
2795 +
2796 +}
2797 +
2798 +#ifdef CONFIG_PAX_KERNEXEC
2799 +void module_free_exec(struct module *mod, void *module_region)
2800 +{
2801 + module_free(mod, module_region);
2802 +}
2803 +EXPORT_SYMBOL(module_free_exec);
2804 +
2805 +void *module_alloc_exec(unsigned long size)
2806 +{
2807 + return __module_alloc(size, PAGE_KERNEL_EXEC);
2808 +}
2809 +EXPORT_SYMBOL(module_alloc_exec);
2810 +#endif
2811 #endif
2812
2813 int
2814 diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2815 index 07314af..c46655c 100644
2816 --- a/arch/arm/kernel/patch.c
2817 +++ b/arch/arm/kernel/patch.c
2818 @@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2819 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2820 int size;
2821
2822 + pax_open_kernel();
2823 if (thumb2 && __opcode_is_thumb16(insn)) {
2824 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2825 size = sizeof(u16);
2826 @@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2827 *(u32 *)addr = insn;
2828 size = sizeof(u32);
2829 }
2830 + pax_close_kernel();
2831
2832 flush_icache_range((uintptr_t)(addr),
2833 (uintptr_t)(addr) + size);
2834 diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2835 index 92f7b15..7048500 100644
2836 --- a/arch/arm/kernel/process.c
2837 +++ b/arch/arm/kernel/process.c
2838 @@ -217,6 +217,7 @@ void machine_power_off(void)
2839
2840 if (pm_power_off)
2841 pm_power_off();
2842 + BUG();
2843 }
2844
2845 /*
2846 @@ -230,7 +231,7 @@ void machine_power_off(void)
2847 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2848 * to use. Implementing such co-ordination would be essentially impossible.
2849 */
2850 -void machine_restart(char *cmd)
2851 +__noreturn void machine_restart(char *cmd)
2852 {
2853 local_irq_disable();
2854 smp_send_stop();
2855 @@ -253,8 +254,8 @@ void __show_regs(struct pt_regs *regs)
2856
2857 show_regs_print_info(KERN_DEFAULT);
2858
2859 - print_symbol("PC is at %s\n", instruction_pointer(regs));
2860 - print_symbol("LR is at %s\n", regs->ARM_lr);
2861 + printk("PC is at %pA\n", (void *)instruction_pointer(regs));
2862 + printk("LR is at %pA\n", (void *)regs->ARM_lr);
2863 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2864 "sp : %08lx ip : %08lx fp : %08lx\n",
2865 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2866 @@ -425,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
2867 return 0;
2868 }
2869
2870 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2871 -{
2872 - unsigned long range_end = mm->brk + 0x02000000;
2873 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2874 -}
2875 -
2876 #ifdef CONFIG_MMU
2877 #ifdef CONFIG_KUSER_HELPERS
2878 /*
2879 @@ -446,7 +441,7 @@ static struct vm_area_struct gate_vma = {
2880
2881 static int __init gate_vma_init(void)
2882 {
2883 - gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2884 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2885 return 0;
2886 }
2887 arch_initcall(gate_vma_init);
2888 @@ -472,41 +467,16 @@ int in_gate_area_no_mm(unsigned long addr)
2889
2890 const char *arch_vma_name(struct vm_area_struct *vma)
2891 {
2892 - return is_gate_vma(vma) ? "[vectors]" :
2893 - (vma->vm_mm && vma->vm_start == vma->vm_mm->context.sigpage) ?
2894 - "[sigpage]" : NULL;
2895 + return is_gate_vma(vma) ? "[vectors]" : NULL;
2896 }
2897
2898 -static struct page *signal_page;
2899 -extern struct page *get_signal_page(void);
2900 -
2901 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
2902 {
2903 struct mm_struct *mm = current->mm;
2904 - unsigned long addr;
2905 - int ret;
2906 -
2907 - if (!signal_page)
2908 - signal_page = get_signal_page();
2909 - if (!signal_page)
2910 - return -ENOMEM;
2911
2912 down_write(&mm->mmap_sem);
2913 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
2914 - if (IS_ERR_VALUE(addr)) {
2915 - ret = addr;
2916 - goto up_fail;
2917 - }
2918 -
2919 - ret = install_special_mapping(mm, addr, PAGE_SIZE,
2920 - VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
2921 - &signal_page);
2922 -
2923 - if (ret == 0)
2924 - mm->context.sigpage = addr;
2925 -
2926 - up_fail:
2927 + mm->context.sigpage = (PAGE_OFFSET + (get_random_int() % 0x3FFEFFE0)) & 0xFFFFFFFC;
2928 up_write(&mm->mmap_sem);
2929 - return ret;
2930 + return 0;
2931 }
2932 #endif
2933 diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2934 index 4693188..4596c5e 100644
2935 --- a/arch/arm/kernel/psci.c
2936 +++ b/arch/arm/kernel/psci.c
2937 @@ -24,7 +24,7 @@
2938 #include <asm/opcodes-virt.h>
2939 #include <asm/psci.h>
2940
2941 -struct psci_operations psci_ops;
2942 +struct psci_operations psci_ops __read_only;
2943
2944 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2945
2946 diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2947 index 0dd3b79..e018f64 100644
2948 --- a/arch/arm/kernel/ptrace.c
2949 +++ b/arch/arm/kernel/ptrace.c
2950 @@ -929,10 +929,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2951 return current_thread_info()->syscall;
2952 }
2953
2954 +#ifdef CONFIG_GRKERNSEC_SETXID
2955 +extern void gr_delayed_cred_worker(void);
2956 +#endif
2957 +
2958 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2959 {
2960 current_thread_info()->syscall = scno;
2961
2962 +#ifdef CONFIG_GRKERNSEC_SETXID
2963 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2964 + gr_delayed_cred_worker();
2965 +#endif
2966 +
2967 /* Do the secure computing check first; failures should be fast. */
2968 if (secure_computing(scno) == -1)
2969 return -1;
2970 diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2971 index 0e1e2b3..c0e821d 100644
2972 --- a/arch/arm/kernel/setup.c
2973 +++ b/arch/arm/kernel/setup.c
2974 @@ -98,21 +98,23 @@ EXPORT_SYMBOL(system_serial_high);
2975 unsigned int elf_hwcap __read_mostly;
2976 EXPORT_SYMBOL(elf_hwcap);
2977
2978 +pteval_t __supported_pte_mask __read_only;
2979 +pmdval_t __supported_pmd_mask __read_only;
2980
2981 #ifdef MULTI_CPU
2982 -struct processor processor __read_mostly;
2983 +struct processor processor;
2984 #endif
2985 #ifdef MULTI_TLB
2986 -struct cpu_tlb_fns cpu_tlb __read_mostly;
2987 +struct cpu_tlb_fns cpu_tlb __read_only;
2988 #endif
2989 #ifdef MULTI_USER
2990 -struct cpu_user_fns cpu_user __read_mostly;
2991 +struct cpu_user_fns cpu_user __read_only;
2992 #endif
2993 #ifdef MULTI_CACHE
2994 -struct cpu_cache_fns cpu_cache __read_mostly;
2995 +struct cpu_cache_fns cpu_cache __read_only;
2996 #endif
2997 #ifdef CONFIG_OUTER_CACHE
2998 -struct outer_cache_fns outer_cache __read_mostly;
2999 +struct outer_cache_fns outer_cache __read_only;
3000 EXPORT_SYMBOL(outer_cache);
3001 #endif
3002
3003 @@ -245,9 +247,13 @@ static int __get_cpu_architecture(void)
3004 asm("mrc p15, 0, %0, c0, c1, 4"
3005 : "=r" (mmfr0));
3006 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
3007 - (mmfr0 & 0x000000f0) >= 0x00000030)
3008 + (mmfr0 & 0x000000f0) >= 0x00000030) {
3009 cpu_arch = CPU_ARCH_ARMv7;
3010 - else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3011 + if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
3012 + __supported_pte_mask |= L_PTE_PXN;
3013 + __supported_pmd_mask |= PMD_PXNTABLE;
3014 + }
3015 + } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
3016 (mmfr0 & 0x000000f0) == 0x00000020)
3017 cpu_arch = CPU_ARCH_ARMv6;
3018 else
3019 @@ -571,7 +577,7 @@ static void __init setup_processor(void)
3020 __cpu_architecture = __get_cpu_architecture();
3021
3022 #ifdef MULTI_CPU
3023 - processor = *list->proc;
3024 + memcpy((void *)&processor, list->proc, sizeof processor);
3025 #endif
3026 #ifdef MULTI_TLB
3027 cpu_tlb = *list->tlb;
3028 diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3029 index ab33042..11248a8 100644
3030 --- a/arch/arm/kernel/signal.c
3031 +++ b/arch/arm/kernel/signal.c
3032 @@ -45,8 +45,6 @@ static const unsigned long sigreturn_codes[7] = {
3033 MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
3034 };
3035
3036 -static unsigned long signal_return_offset;
3037 -
3038 #ifdef CONFIG_CRUNCH
3039 static int preserve_crunch_context(struct crunch_sigframe __user *frame)
3040 {
3041 @@ -411,8 +409,7 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3042 * except when the MPU has protected the vectors
3043 * page from PL0
3044 */
3045 - retcode = mm->context.sigpage + signal_return_offset +
3046 - (idx << 2) + thumb;
3047 + retcode = mm->context.sigpage + (idx << 2) + thumb;
3048 } else
3049 #endif
3050 {
3051 @@ -616,33 +613,3 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
3052 } while (thread_flags & _TIF_WORK_MASK);
3053 return 0;
3054 }
3055 -
3056 -struct page *get_signal_page(void)
3057 -{
3058 - unsigned long ptr;
3059 - unsigned offset;
3060 - struct page *page;
3061 - void *addr;
3062 -
3063 - page = alloc_pages(GFP_KERNEL, 0);
3064 -
3065 - if (!page)
3066 - return NULL;
3067 -
3068 - addr = page_address(page);
3069 -
3070 - /* Give the signal return code some randomness */
3071 - offset = 0x200 + (get_random_int() & 0x7fc);
3072 - signal_return_offset = offset;
3073 -
3074 - /*
3075 - * Copy signal return handlers into the vector page, and
3076 - * set sigreturn to be a pointer to these.
3077 - */
3078 - memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
3079 -
3080 - ptr = (unsigned long)addr + offset;
3081 - flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
3082 -
3083 - return page;
3084 -}
3085 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3086 index 72024ea..ae302dd 100644
3087 --- a/arch/arm/kernel/smp.c
3088 +++ b/arch/arm/kernel/smp.c
3089 @@ -70,7 +70,7 @@ enum ipi_msg_type {
3090
3091 static DECLARE_COMPLETION(cpu_running);
3092
3093 -static struct smp_operations smp_ops;
3094 +static struct smp_operations smp_ops __read_only;
3095
3096 void __init smp_set_ops(struct smp_operations *ops)
3097 {
3098 diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3099 index 65ed63f..430c478 100644
3100 --- a/arch/arm/kernel/traps.c
3101 +++ b/arch/arm/kernel/traps.c
3102 @@ -55,7 +55,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3103 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3104 {
3105 #ifdef CONFIG_KALLSYMS
3106 - printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3107 + printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3108 #else
3109 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3110 #endif
3111 @@ -257,6 +257,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3112 static int die_owner = -1;
3113 static unsigned int die_nest_count;
3114
3115 +extern void gr_handle_kernel_exploit(void);
3116 +
3117 static unsigned long oops_begin(void)
3118 {
3119 int cpu;
3120 @@ -299,6 +301,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3121 panic("Fatal exception in interrupt");
3122 if (panic_on_oops)
3123 panic("Fatal exception");
3124 +
3125 + gr_handle_kernel_exploit();
3126 +
3127 if (signr)
3128 do_exit(signr);
3129 }
3130 @@ -629,7 +634,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3131 * The user helper at 0xffff0fe0 must be used instead.
3132 * (see entry-armv.S for details)
3133 */
3134 + pax_open_kernel();
3135 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3136 + pax_close_kernel();
3137 }
3138 return 0;
3139
3140 @@ -886,7 +893,11 @@ void __init early_trap_init(void *vectors_base)
3141 kuser_init(vectors_base);
3142
3143 flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
3144 - modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3145 +
3146 +#ifndef CONFIG_PAX_MEMORY_UDEREF
3147 + modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3148 +#endif
3149 +
3150 #else /* ifndef CONFIG_CPU_V7M */
3151 /*
3152 * on V7-M there is no need to copy the vector table to a dedicated
3153 diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3154 index 7bcee5c..e2f3249 100644
3155 --- a/arch/arm/kernel/vmlinux.lds.S
3156 +++ b/arch/arm/kernel/vmlinux.lds.S
3157 @@ -8,7 +8,11 @@
3158 #include <asm/thread_info.h>
3159 #include <asm/memory.h>
3160 #include <asm/page.h>
3161 -
3162 +
3163 +#ifdef CONFIG_PAX_KERNEXEC
3164 +#include <asm/pgtable.h>
3165 +#endif
3166 +
3167 #define PROC_INFO \
3168 . = ALIGN(4); \
3169 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3170 @@ -34,7 +38,7 @@
3171 #endif
3172
3173 #if (defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK)) || \
3174 - defined(CONFIG_GENERIC_BUG)
3175 + defined(CONFIG_GENERIC_BUG) || defined(CONFIG_PAX_REFCOUNT)
3176 #define ARM_EXIT_KEEP(x) x
3177 #define ARM_EXIT_DISCARD(x)
3178 #else
3179 @@ -90,6 +94,11 @@ SECTIONS
3180 _text = .;
3181 HEAD_TEXT
3182 }
3183 +
3184 +#ifdef CONFIG_PAX_KERNEXEC
3185 + . = ALIGN(1<<SECTION_SHIFT);
3186 +#endif
3187 +
3188 .text : { /* Real text segment */
3189 _stext = .; /* Text and read-only data */
3190 __exception_text_start = .;
3191 @@ -112,6 +121,8 @@ SECTIONS
3192 ARM_CPU_KEEP(PROC_INFO)
3193 }
3194
3195 + _etext = .; /* End of text section */
3196 +
3197 RO_DATA(PAGE_SIZE)
3198
3199 . = ALIGN(4);
3200 @@ -142,7 +153,9 @@ SECTIONS
3201
3202 NOTES
3203
3204 - _etext = .; /* End of text and rodata section */
3205 +#ifdef CONFIG_PAX_KERNEXEC
3206 + . = ALIGN(1<<SECTION_SHIFT);
3207 +#endif
3208
3209 #ifndef CONFIG_XIP_KERNEL
3210 . = ALIGN(PAGE_SIZE);
3211 @@ -220,6 +233,11 @@ SECTIONS
3212 . = PAGE_OFFSET + TEXT_OFFSET;
3213 #else
3214 __init_end = .;
3215 +
3216 +#ifdef CONFIG_PAX_KERNEXEC
3217 + . = ALIGN(1<<SECTION_SHIFT);
3218 +#endif
3219 +
3220 . = ALIGN(THREAD_SIZE);
3221 __data_loc = .;
3222 #endif
3223 diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
3224 index 9c697db..115237f 100644
3225 --- a/arch/arm/kvm/arm.c
3226 +++ b/arch/arm/kvm/arm.c
3227 @@ -56,7 +56,7 @@ static unsigned long hyp_default_vectors;
3228 static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
3229
3230 /* The VMID used in the VTTBR */
3231 -static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
3232 +static atomic64_unchecked_t kvm_vmid_gen = ATOMIC64_INIT(1);
3233 static u8 kvm_next_vmid;
3234 static DEFINE_SPINLOCK(kvm_vmid_lock);
3235
3236 @@ -396,7 +396,7 @@ void force_vm_exit(const cpumask_t *mask)
3237 */
3238 static bool need_new_vmid_gen(struct kvm *kvm)
3239 {
3240 - return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
3241 + return unlikely(kvm->arch.vmid_gen != atomic64_read_unchecked(&kvm_vmid_gen));
3242 }
3243
3244 /**
3245 @@ -429,7 +429,7 @@ static void update_vttbr(struct kvm *kvm)
3246
3247 /* First user of a new VMID generation? */
3248 if (unlikely(kvm_next_vmid == 0)) {
3249 - atomic64_inc(&kvm_vmid_gen);
3250 + atomic64_inc_unchecked(&kvm_vmid_gen);
3251 kvm_next_vmid = 1;
3252
3253 /*
3254 @@ -446,7 +446,7 @@ static void update_vttbr(struct kvm *kvm)
3255 kvm_call_hyp(__kvm_flush_vm_context);
3256 }
3257
3258 - kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
3259 + kvm->arch.vmid_gen = atomic64_read_unchecked(&kvm_vmid_gen);
3260 kvm->arch.vmid = kvm_next_vmid;
3261 kvm_next_vmid++;
3262
3263 diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3264 index 14a0d98..7771a7d 100644
3265 --- a/arch/arm/lib/clear_user.S
3266 +++ b/arch/arm/lib/clear_user.S
3267 @@ -12,14 +12,14 @@
3268
3269 .text
3270
3271 -/* Prototype: int __clear_user(void *addr, size_t sz)
3272 +/* Prototype: int ___clear_user(void *addr, size_t sz)
3273 * Purpose : clear some user memory
3274 * Params : addr - user memory address to clear
3275 * : sz - number of bytes to clear
3276 * Returns : number of bytes NOT cleared
3277 */
3278 ENTRY(__clear_user_std)
3279 -WEAK(__clear_user)
3280 +WEAK(___clear_user)
3281 stmfd sp!, {r1, lr}
3282 mov r2, #0
3283 cmp r1, #4
3284 @@ -44,7 +44,7 @@ WEAK(__clear_user)
3285 USER( strnebt r2, [r0])
3286 mov r0, #0
3287 ldmfd sp!, {r1, pc}
3288 -ENDPROC(__clear_user)
3289 +ENDPROC(___clear_user)
3290 ENDPROC(__clear_user_std)
3291
3292 .pushsection .fixup,"ax"
3293 diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3294 index 66a477a..bee61d3 100644
3295 --- a/arch/arm/lib/copy_from_user.S
3296 +++ b/arch/arm/lib/copy_from_user.S
3297 @@ -16,7 +16,7 @@
3298 /*
3299 * Prototype:
3300 *
3301 - * size_t __copy_from_user(void *to, const void *from, size_t n)
3302 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
3303 *
3304 * Purpose:
3305 *
3306 @@ -84,11 +84,11 @@
3307
3308 .text
3309
3310 -ENTRY(__copy_from_user)
3311 +ENTRY(___copy_from_user)
3312
3313 #include "copy_template.S"
3314
3315 -ENDPROC(__copy_from_user)
3316 +ENDPROC(___copy_from_user)
3317
3318 .pushsection .fixup,"ax"
3319 .align 0
3320 diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3321 index 6ee2f67..d1cce76 100644
3322 --- a/arch/arm/lib/copy_page.S
3323 +++ b/arch/arm/lib/copy_page.S
3324 @@ -10,6 +10,7 @@
3325 * ASM optimised string functions
3326 */
3327 #include <linux/linkage.h>
3328 +#include <linux/const.h>
3329 #include <asm/assembler.h>
3330 #include <asm/asm-offsets.h>
3331 #include <asm/cache.h>
3332 diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3333 index d066df6..df28194 100644
3334 --- a/arch/arm/lib/copy_to_user.S
3335 +++ b/arch/arm/lib/copy_to_user.S
3336 @@ -16,7 +16,7 @@
3337 /*
3338 * Prototype:
3339 *
3340 - * size_t __copy_to_user(void *to, const void *from, size_t n)
3341 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
3342 *
3343 * Purpose:
3344 *
3345 @@ -88,11 +88,11 @@
3346 .text
3347
3348 ENTRY(__copy_to_user_std)
3349 -WEAK(__copy_to_user)
3350 +WEAK(___copy_to_user)
3351
3352 #include "copy_template.S"
3353
3354 -ENDPROC(__copy_to_user)
3355 +ENDPROC(___copy_to_user)
3356 ENDPROC(__copy_to_user_std)
3357
3358 .pushsection .fixup,"ax"
3359 diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3360 index 7d08b43..f7ca7ea 100644
3361 --- a/arch/arm/lib/csumpartialcopyuser.S
3362 +++ b/arch/arm/lib/csumpartialcopyuser.S
3363 @@ -57,8 +57,8 @@
3364 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3365 */
3366
3367 -#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3368 -#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3369 +#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3370 +#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3371
3372 #include "csumpartialcopygeneric.S"
3373
3374 diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3375 index 5306de3..aed6d03 100644
3376 --- a/arch/arm/lib/delay.c
3377 +++ b/arch/arm/lib/delay.c
3378 @@ -28,7 +28,7 @@
3379 /*
3380 * Default to the loop-based delay implementation.
3381 */
3382 -struct arm_delay_ops arm_delay_ops = {
3383 +struct arm_delay_ops arm_delay_ops __read_only = {
3384 .delay = __loop_delay,
3385 .const_udelay = __loop_const_udelay,
3386 .udelay = __loop_udelay,
3387 diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3388 index 025f742..a9e5b3b 100644
3389 --- a/arch/arm/lib/uaccess_with_memcpy.c
3390 +++ b/arch/arm/lib/uaccess_with_memcpy.c
3391 @@ -104,7 +104,7 @@ out:
3392 }
3393
3394 unsigned long
3395 -__copy_to_user(void __user *to, const void *from, unsigned long n)
3396 +___copy_to_user(void __user *to, const void *from, unsigned long n)
3397 {
3398 /*
3399 * This test is stubbed out of the main function above to keep
3400 @@ -155,7 +155,7 @@ out:
3401 return n;
3402 }
3403
3404 -unsigned long __clear_user(void __user *addr, unsigned long n)
3405 +unsigned long ___clear_user(void __user *addr, unsigned long n)
3406 {
3407 /* See rational for this in __copy_to_user() above. */
3408 if (n < 64)
3409 diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3410 index 1767611..d2e7e24 100644
3411 --- a/arch/arm/mach-kirkwood/common.c
3412 +++ b/arch/arm/mach-kirkwood/common.c
3413 @@ -156,7 +156,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3414 clk_gate_ops.disable(hw);
3415 }
3416
3417 -static struct clk_ops clk_gate_fn_ops;
3418 +static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3419 +{
3420 + return clk_gate_ops.is_enabled(hw);
3421 +}
3422 +
3423 +static struct clk_ops clk_gate_fn_ops = {
3424 + .enable = clk_gate_fn_enable,
3425 + .disable = clk_gate_fn_disable,
3426 + .is_enabled = clk_gate_fn_is_enabled,
3427 +};
3428
3429 static struct clk __init *clk_register_gate_fn(struct device *dev,
3430 const char *name,
3431 @@ -190,14 +199,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3432 gate_fn->fn_en = fn_en;
3433 gate_fn->fn_dis = fn_dis;
3434
3435 - /* ops is the gate ops, but with our enable/disable functions */
3436 - if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3437 - clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3438 - clk_gate_fn_ops = clk_gate_ops;
3439 - clk_gate_fn_ops.enable = clk_gate_fn_enable;
3440 - clk_gate_fn_ops.disable = clk_gate_fn_disable;
3441 - }
3442 -
3443 clk = clk_register(dev, &gate_fn->gate.hw);
3444
3445 if (IS_ERR(clk))
3446 diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3447 index 827d1500..2885dc6 100644
3448 --- a/arch/arm/mach-omap2/board-n8x0.c
3449 +++ b/arch/arm/mach-omap2/board-n8x0.c
3450 @@ -627,7 +627,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3451 }
3452 #endif
3453
3454 -static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3455 +static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3456 .late_init = n8x0_menelaus_late_init,
3457 };
3458
3459 diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3460 index 579697a..1d5a3b2 100644
3461 --- a/arch/arm/mach-omap2/gpmc.c
3462 +++ b/arch/arm/mach-omap2/gpmc.c
3463 @@ -148,7 +148,6 @@ struct omap3_gpmc_regs {
3464 };
3465
3466 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3467 -static struct irq_chip gpmc_irq_chip;
3468 static int gpmc_irq_start;
3469
3470 static struct resource gpmc_mem_root;
3471 @@ -716,6 +715,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3472
3473 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3474
3475 +static struct irq_chip gpmc_irq_chip = {
3476 + .name = "gpmc",
3477 + .irq_startup = gpmc_irq_noop_ret,
3478 + .irq_enable = gpmc_irq_enable,
3479 + .irq_disable = gpmc_irq_disable,
3480 + .irq_shutdown = gpmc_irq_noop,
3481 + .irq_ack = gpmc_irq_noop,
3482 + .irq_mask = gpmc_irq_noop,
3483 + .irq_unmask = gpmc_irq_noop,
3484 +
3485 +};
3486 +
3487 static int gpmc_setup_irq(void)
3488 {
3489 int i;
3490 @@ -730,15 +741,6 @@ static int gpmc_setup_irq(void)
3491 return gpmc_irq_start;
3492 }
3493
3494 - gpmc_irq_chip.name = "gpmc";
3495 - gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3496 - gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3497 - gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3498 - gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3499 - gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3500 - gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3501 - gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3502 -
3503 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3504 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3505
3506 diff --git a/arch/arm/mach-omap2/omap-mpuss-lowpower.c b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3507 index f991016..145ebeb 100644
3508 --- a/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3509 +++ b/arch/arm/mach-omap2/omap-mpuss-lowpower.c
3510 @@ -84,7 +84,7 @@ struct cpu_pm_ops {
3511 int (*finish_suspend)(unsigned long cpu_state);
3512 void (*resume)(void);
3513 void (*scu_prepare)(unsigned int cpu_id, unsigned int cpu_state);
3514 -};
3515 +} __no_const;
3516
3517 static DEFINE_PER_CPU(struct omap4_cpu_pm_info, omap4_pm_info);
3518 static struct powerdomain *mpuss_pd;
3519 @@ -102,7 +102,7 @@ static void dummy_cpu_resume(void)
3520 static void dummy_scu_prepare(unsigned int cpu_id, unsigned int cpu_state)
3521 {}
3522
3523 -struct cpu_pm_ops omap_pm_ops = {
3524 +static struct cpu_pm_ops omap_pm_ops __read_only = {
3525 .finish_suspend = default_finish_suspend,
3526 .resume = dummy_cpu_resume,
3527 .scu_prepare = dummy_scu_prepare,
3528 diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3529 index 813c615..ce467c6 100644
3530 --- a/arch/arm/mach-omap2/omap-wakeupgen.c
3531 +++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3532 @@ -339,7 +339,7 @@ static int irq_cpu_hotplug_notify(struct notifier_block *self,
3533 return NOTIFY_OK;
3534 }
3535
3536 -static struct notifier_block __refdata irq_hotplug_notifier = {
3537 +static struct notifier_block irq_hotplug_notifier = {
3538 .notifier_call = irq_cpu_hotplug_notify,
3539 };
3540
3541 diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3542 index 53f0735..5b54eb6 100644
3543 --- a/arch/arm/mach-omap2/omap_device.c
3544 +++ b/arch/arm/mach-omap2/omap_device.c
3545 @@ -504,7 +504,7 @@ void omap_device_delete(struct omap_device *od)
3546 struct platform_device __init *omap_device_build(const char *pdev_name,
3547 int pdev_id,
3548 struct omap_hwmod *oh,
3549 - void *pdata, int pdata_len)
3550 + const void *pdata, int pdata_len)
3551 {
3552 struct omap_hwmod *ohs[] = { oh };
3553
3554 @@ -532,7 +532,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3555 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3556 int pdev_id,
3557 struct omap_hwmod **ohs,
3558 - int oh_cnt, void *pdata,
3559 + int oh_cnt, const void *pdata,
3560 int pdata_len)
3561 {
3562 int ret = -ENOMEM;
3563 diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3564 index 17ca1ae..beba869 100644
3565 --- a/arch/arm/mach-omap2/omap_device.h
3566 +++ b/arch/arm/mach-omap2/omap_device.h
3567 @@ -71,12 +71,12 @@ int omap_device_idle(struct platform_device *pdev);
3568 /* Core code interface */
3569
3570 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3571 - struct omap_hwmod *oh, void *pdata,
3572 + struct omap_hwmod *oh, const void *pdata,
3573 int pdata_len);
3574
3575 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3576 struct omap_hwmod **oh, int oh_cnt,
3577 - void *pdata, int pdata_len);
3578 + const void *pdata, int pdata_len);
3579
3580 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3581 struct omap_hwmod **ohs, int oh_cnt);
3582 diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3583 index 832adb1..49b62c4 100644
3584 --- a/arch/arm/mach-omap2/omap_hwmod.c
3585 +++ b/arch/arm/mach-omap2/omap_hwmod.c
3586 @@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3587 int (*init_clkdm)(struct omap_hwmod *oh);
3588 void (*update_context_lost)(struct omap_hwmod *oh);
3589 int (*get_context_lost)(struct omap_hwmod *oh);
3590 -};
3591 +} __no_const;
3592
3593 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3594 -static struct omap_hwmod_soc_ops soc_ops;
3595 +static struct omap_hwmod_soc_ops soc_ops __read_only;
3596
3597 /* omap_hwmod_list contains all registered struct omap_hwmods */
3598 static LIST_HEAD(omap_hwmod_list);
3599 diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3600 index d15c7bb..b2d1f0c 100644
3601 --- a/arch/arm/mach-omap2/wd_timer.c
3602 +++ b/arch/arm/mach-omap2/wd_timer.c
3603 @@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3604 struct omap_hwmod *oh;
3605 char *oh_name = "wd_timer2";
3606 char *dev_name = "omap_wdt";
3607 - struct omap_wd_timer_platform_data pdata;
3608 + static struct omap_wd_timer_platform_data pdata = {
3609 + .read_reset_sources = prm_read_reset_sources
3610 + };
3611
3612 if (!cpu_class_is_omap2() || of_have_populated_dt())
3613 return 0;
3614 @@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3615 return -EINVAL;
3616 }
3617
3618 - pdata.read_reset_sources = prm_read_reset_sources;
3619 -
3620 pdev = omap_device_build(dev_name, id, oh, &pdata,
3621 sizeof(struct omap_wd_timer_platform_data));
3622 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3623 diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3624 index b82dcae..44ee5b6 100644
3625 --- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3626 +++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3627 @@ -180,7 +180,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3628 bool entered_lp2 = false;
3629
3630 if (tegra_pending_sgi())
3631 - ACCESS_ONCE(abort_flag) = true;
3632 + ACCESS_ONCE_RW(abort_flag) = true;
3633
3634 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3635
3636 diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3637 index 656324a..0beba28 100644
3638 --- a/arch/arm/mach-ux500/setup.h
3639 +++ b/arch/arm/mach-ux500/setup.h
3640 @@ -40,13 +40,6 @@ extern void ux500_timer_init(void);
3641 .type = MT_DEVICE, \
3642 }
3643
3644 -#define __MEM_DEV_DESC(x, sz) { \
3645 - .virtual = IO_ADDRESS(x), \
3646 - .pfn = __phys_to_pfn(x), \
3647 - .length = sz, \
3648 - .type = MT_MEMORY, \
3649 -}
3650 -
3651 extern struct smp_operations ux500_smp_ops;
3652 extern void ux500_cpu_die(unsigned int cpu);
3653
3654 diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3655 index cd2c88e..4dd9b67 100644
3656 --- a/arch/arm/mm/Kconfig
3657 +++ b/arch/arm/mm/Kconfig
3658 @@ -446,7 +446,7 @@ config CPU_32v5
3659
3660 config CPU_32v6
3661 bool
3662 - select CPU_USE_DOMAINS if CPU_V6 && MMU
3663 + select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3664 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3665
3666 config CPU_32v6K
3667 @@ -601,6 +601,7 @@ config CPU_CP15_MPU
3668
3669 config CPU_USE_DOMAINS
3670 bool
3671 + depends on !ARM_LPAE && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3672 help
3673 This option enables or disables the use of domain switching
3674 via the set_fs() function.
3675 @@ -800,6 +801,7 @@ config NEED_KUSER_HELPERS
3676 config KUSER_HELPERS
3677 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
3678 default y
3679 + depends on !(CPU_V6 || CPU_V6K || CPU_V7) || GRKERNSEC_OLD_ARM_USERLAND
3680 help
3681 Warning: disabling this option may break user programs.
3682
3683 @@ -812,7 +814,7 @@ config KUSER_HELPERS
3684 See Documentation/arm/kernel_user_helpers.txt for details.
3685
3686 However, the fixed address nature of these helpers can be used
3687 - by ROP (return orientated programming) authors when creating
3688 + by ROP (Return Oriented Programming) authors when creating
3689 exploits.
3690
3691 If all of the binaries and libraries which run on your platform
3692 diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3693 index 6f4585b..7b6f52b 100644
3694 --- a/arch/arm/mm/alignment.c
3695 +++ b/arch/arm/mm/alignment.c
3696 @@ -211,10 +211,12 @@ union offset_union {
3697 #define __get16_unaligned_check(ins,val,addr) \
3698 do { \
3699 unsigned int err = 0, v, a = addr; \
3700 + pax_open_userland(); \
3701 __get8_unaligned_check(ins,v,a,err); \
3702 val = v << ((BE) ? 8 : 0); \
3703 __get8_unaligned_check(ins,v,a,err); \
3704 val |= v << ((BE) ? 0 : 8); \
3705 + pax_close_userland(); \
3706 if (err) \
3707 goto fault; \
3708 } while (0)
3709 @@ -228,6 +230,7 @@ union offset_union {
3710 #define __get32_unaligned_check(ins,val,addr) \
3711 do { \
3712 unsigned int err = 0, v, a = addr; \
3713 + pax_open_userland(); \
3714 __get8_unaligned_check(ins,v,a,err); \
3715 val = v << ((BE) ? 24 : 0); \
3716 __get8_unaligned_check(ins,v,a,err); \
3717 @@ -236,6 +239,7 @@ union offset_union {
3718 val |= v << ((BE) ? 8 : 16); \
3719 __get8_unaligned_check(ins,v,a,err); \
3720 val |= v << ((BE) ? 0 : 24); \
3721 + pax_close_userland(); \
3722 if (err) \
3723 goto fault; \
3724 } while (0)
3725 @@ -249,6 +253,7 @@ union offset_union {
3726 #define __put16_unaligned_check(ins,val,addr) \
3727 do { \
3728 unsigned int err = 0, v = val, a = addr; \
3729 + pax_open_userland(); \
3730 __asm__( FIRST_BYTE_16 \
3731 ARM( "1: "ins" %1, [%2], #1\n" ) \
3732 THUMB( "1: "ins" %1, [%2]\n" ) \
3733 @@ -268,6 +273,7 @@ union offset_union {
3734 " .popsection\n" \
3735 : "=r" (err), "=&r" (v), "=&r" (a) \
3736 : "0" (err), "1" (v), "2" (a)); \
3737 + pax_close_userland(); \
3738 if (err) \
3739 goto fault; \
3740 } while (0)
3741 @@ -281,6 +287,7 @@ union offset_union {
3742 #define __put32_unaligned_check(ins,val,addr) \
3743 do { \
3744 unsigned int err = 0, v = val, a = addr; \
3745 + pax_open_userland(); \
3746 __asm__( FIRST_BYTE_32 \
3747 ARM( "1: "ins" %1, [%2], #1\n" ) \
3748 THUMB( "1: "ins" %1, [%2]\n" ) \
3749 @@ -310,6 +317,7 @@ union offset_union {
3750 " .popsection\n" \
3751 : "=r" (err), "=&r" (v), "=&r" (a) \
3752 : "0" (err), "1" (v), "2" (a)); \
3753 + pax_close_userland(); \
3754 if (err) \
3755 goto fault; \
3756 } while (0)
3757 diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
3758 index 447da6f..77a5057 100644
3759 --- a/arch/arm/mm/cache-l2x0.c
3760 +++ b/arch/arm/mm/cache-l2x0.c
3761 @@ -45,7 +45,7 @@ struct l2x0_of_data {
3762 void (*setup)(const struct device_node *, u32 *, u32 *);
3763 void (*save)(void);
3764 struct outer_cache_fns outer_cache;
3765 -};
3766 +} __do_const;
3767
3768 static bool of_init = false;
3769
3770 diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
3771 index 84e6f77..0b52f31 100644
3772 --- a/arch/arm/mm/context.c
3773 +++ b/arch/arm/mm/context.c
3774 @@ -43,7 +43,7 @@
3775 #define NUM_USER_ASIDS ASID_FIRST_VERSION
3776
3777 static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
3778 -static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3779 +static atomic64_unchecked_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
3780 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
3781
3782 static DEFINE_PER_CPU(atomic64_t, active_asids);
3783 @@ -180,7 +180,7 @@ static int is_reserved_asid(u64 asid)
3784 static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3785 {
3786 u64 asid = atomic64_read(&mm->context.id);
3787 - u64 generation = atomic64_read(&asid_generation);
3788 + u64 generation = atomic64_read_unchecked(&asid_generation);
3789
3790 if (asid != 0 && is_reserved_asid(asid)) {
3791 /*
3792 @@ -198,7 +198,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
3793 */
3794 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3795 if (asid == NUM_USER_ASIDS) {
3796 - generation = atomic64_add_return(ASID_FIRST_VERSION,
3797 + generation = atomic64_add_return_unchecked(ASID_FIRST_VERSION,
3798 &asid_generation);
3799 flush_context(cpu);
3800 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
3801 @@ -227,14 +227,14 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
3802 cpu_set_reserved_ttbr0();
3803
3804 asid = atomic64_read(&mm->context.id);
3805 - if (!((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS)
3806 + if (!((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS)
3807 && atomic64_xchg(&per_cpu(active_asids, cpu), asid))
3808 goto switch_mm_fastpath;
3809
3810 raw_spin_lock_irqsave(&cpu_asid_lock, flags);
3811 /* Check that our ASID belongs to the current generation. */
3812 asid = atomic64_read(&mm->context.id);
3813 - if ((asid ^ atomic64_read(&asid_generation)) >> ASID_BITS) {
3814 + if ((asid ^ atomic64_read_unchecked(&asid_generation)) >> ASID_BITS) {
3815 asid = new_context(mm, cpu);
3816 atomic64_set(&mm->context.id, asid);
3817 }
3818 diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3819 index eb8830a..5360ce7 100644
3820 --- a/arch/arm/mm/fault.c
3821 +++ b/arch/arm/mm/fault.c
3822 @@ -25,6 +25,7 @@
3823 #include <asm/system_misc.h>
3824 #include <asm/system_info.h>
3825 #include <asm/tlbflush.h>
3826 +#include <asm/sections.h>
3827
3828 #include "fault.h"
3829
3830 @@ -138,6 +139,31 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3831 if (fixup_exception(regs))
3832 return;
3833
3834 +#ifdef CONFIG_PAX_MEMORY_UDEREF
3835 + if (addr < TASK_SIZE) {
3836 + if (current->signal->curr_ip)
3837 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3838 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3839 + else
3840 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3841 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3842 + }
3843 +#endif
3844 +
3845 +#ifdef CONFIG_PAX_KERNEXEC
3846 + if ((fsr & FSR_WRITE) &&
3847 + (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3848 + (MODULES_VADDR <= addr && addr < MODULES_END)))
3849 + {
3850 + if (current->signal->curr_ip)
3851 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3852 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3853 + else
3854 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3855 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3856 + }
3857 +#endif
3858 +
3859 /*
3860 * No handler, we'll have to terminate things with extreme prejudice.
3861 */
3862 @@ -174,6 +200,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3863 }
3864 #endif
3865
3866 +#ifdef CONFIG_PAX_PAGEEXEC
3867 + if (fsr & FSR_LNX_PF) {
3868 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3869 + do_group_exit(SIGKILL);
3870 + }
3871 +#endif
3872 +
3873 tsk->thread.address = addr;
3874 tsk->thread.error_code = fsr;
3875 tsk->thread.trap_no = 14;
3876 @@ -401,6 +434,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3877 }
3878 #endif /* CONFIG_MMU */
3879
3880 +#ifdef CONFIG_PAX_PAGEEXEC
3881 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3882 +{
3883 + long i;
3884 +
3885 + printk(KERN_ERR "PAX: bytes at PC: ");
3886 + for (i = 0; i < 20; i++) {
3887 + unsigned char c;
3888 + if (get_user(c, (__force unsigned char __user *)pc+i))
3889 + printk(KERN_CONT "?? ");
3890 + else
3891 + printk(KERN_CONT "%02x ", c);
3892 + }
3893 + printk("\n");
3894 +
3895 + printk(KERN_ERR "PAX: bytes at SP-4: ");
3896 + for (i = -1; i < 20; i++) {
3897 + unsigned long c;
3898 + if (get_user(c, (__force unsigned long __user *)sp+i))
3899 + printk(KERN_CONT "???????? ");
3900 + else
3901 + printk(KERN_CONT "%08lx ", c);
3902 + }
3903 + printk("\n");
3904 +}
3905 +#endif
3906 +
3907 /*
3908 * First Level Translation Fault Handler
3909 *
3910 @@ -548,9 +608,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3911 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3912 struct siginfo info;
3913
3914 +#ifdef CONFIG_PAX_MEMORY_UDEREF
3915 + if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3916 + if (current->signal->curr_ip)
3917 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3918 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3919 + else
3920 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3921 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3922 + goto die;
3923 + }
3924 +#endif
3925 +
3926 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3927 return;
3928
3929 +die:
3930 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3931 inf->name, fsr, addr);
3932
3933 @@ -574,15 +647,98 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
3934 ifsr_info[nr].name = name;
3935 }
3936
3937 +asmlinkage int sys_sigreturn(struct pt_regs *regs);
3938 +asmlinkage int sys_rt_sigreturn(struct pt_regs *regs);
3939 +
3940 asmlinkage void __exception
3941 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3942 {
3943 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3944 struct siginfo info;
3945 + unsigned long pc = instruction_pointer(regs);
3946 +
3947 + if (user_mode(regs)) {
3948 + unsigned long sigpage = current->mm->context.sigpage;
3949 +
3950 + if (sigpage <= pc && pc < sigpage + 7*4) {
3951 + if (pc < sigpage + 3*4)
3952 + sys_sigreturn(regs);
3953 + else
3954 + sys_rt_sigreturn(regs);
3955 + return;
3956 + }
3957 + if (pc == 0xffff0f60UL) {
3958 + /*
3959 + * PaX: __kuser_cmpxchg64 emulation
3960 + */
3961 + // TODO
3962 + //regs->ARM_pc = regs->ARM_lr;
3963 + //return;
3964 + }
3965 + if (pc == 0xffff0fa0UL) {
3966 + /*
3967 + * PaX: __kuser_memory_barrier emulation
3968 + */
3969 + // dmb(); implied by the exception
3970 + regs->ARM_pc = regs->ARM_lr;
3971 + return;
3972 + }
3973 + if (pc == 0xffff0fc0UL) {
3974 + /*
3975 + * PaX: __kuser_cmpxchg emulation
3976 + */
3977 + // TODO
3978 + //long new;
3979 + //int op;
3980 +
3981 + //op = FUTEX_OP_SET << 28;
3982 + //new = futex_atomic_op_inuser(op, regs->ARM_r2);
3983 + //regs->ARM_r0 = old != new;
3984 + //regs->ARM_pc = regs->ARM_lr;
3985 + //return;
3986 + }
3987 + if (pc == 0xffff0fe0UL) {
3988 + /*
3989 + * PaX: __kuser_get_tls emulation
3990 + */
3991 + regs->ARM_r0 = current_thread_info()->tp_value[0];
3992 + regs->ARM_pc = regs->ARM_lr;
3993 + return;
3994 + }
3995 + }
3996 +
3997 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3998 + else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3999 + if (current->signal->curr_ip)
4000 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
4001 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4002 + pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4003 + else
4004 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
4005 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
4006 + pc >= TASK_SIZE ? "non-executable kernel" : "userland", pc);
4007 + goto die;
4008 + }
4009 +#endif
4010 +
4011 +#ifdef CONFIG_PAX_REFCOUNT
4012 + if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
4013 + unsigned int bkpt;
4014 +
4015 + if (!probe_kernel_address(pc, bkpt) && cpu_to_le32(bkpt) == 0xe12f1073) {
4016 + current->thread.error_code = ifsr;
4017 + current->thread.trap_no = 0;
4018 + pax_report_refcount_overflow(regs);
4019 + fixup_exception(regs);
4020 + return;
4021 + }
4022 + }
4023 +#endif
4024
4025 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
4026 return;
4027
4028 +die:
4029 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
4030 inf->name, ifsr, addr);
4031
4032 diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
4033 index cf08bdf..772656c 100644
4034 --- a/arch/arm/mm/fault.h
4035 +++ b/arch/arm/mm/fault.h
4036 @@ -3,6 +3,7 @@
4037
4038 /*
4039 * Fault status register encodings. We steal bit 31 for our own purposes.
4040 + * Set when the FSR value is from an instruction fault.
4041 */
4042 #define FSR_LNX_PF (1 << 31)
4043 #define FSR_WRITE (1 << 11)
4044 @@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
4045 }
4046 #endif
4047
4048 +/* valid for LPAE and !LPAE */
4049 +static inline int is_xn_fault(unsigned int fsr)
4050 +{
4051 + return ((fsr_fs(fsr) & 0x3c) == 0xc);
4052 +}
4053 +
4054 +static inline int is_domain_fault(unsigned int fsr)
4055 +{
4056 + return ((fsr_fs(fsr) & 0xD) == 0x9);
4057 +}
4058 +
4059 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
4060 unsigned long search_exception_table(unsigned long addr);
4061
4062 diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
4063 index 18ec4c5..479bb6a 100644
4064 --- a/arch/arm/mm/init.c
4065 +++ b/arch/arm/mm/init.c
4066 @@ -30,6 +30,8 @@
4067 #include <asm/setup.h>
4068 #include <asm/tlb.h>
4069 #include <asm/fixmap.h>
4070 +#include <asm/system_info.h>
4071 +#include <asm/cp15.h>
4072
4073 #include <asm/mach/arch.h>
4074 #include <asm/mach/map.h>
4075 @@ -684,7 +686,46 @@ void free_initmem(void)
4076 {
4077 #ifdef CONFIG_HAVE_TCM
4078 extern char __tcm_start, __tcm_end;
4079 +#endif
4080
4081 +#ifdef CONFIG_PAX_KERNEXEC
4082 + unsigned long addr;
4083 + pgd_t *pgd;
4084 + pud_t *pud;
4085 + pmd_t *pmd;
4086 + int cpu_arch = cpu_architecture();
4087 + unsigned int cr = get_cr();
4088 +
4089 + if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
4090 + /* make pages tables, etc before .text NX */
4091 + for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
4092 + pgd = pgd_offset_k(addr);
4093 + pud = pud_offset(pgd, addr);
4094 + pmd = pmd_offset(pud, addr);
4095 + __section_update(pmd, addr, PMD_SECT_XN);
4096 + }
4097 + /* make init NX */
4098 + for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
4099 + pgd = pgd_offset_k(addr);
4100 + pud = pud_offset(pgd, addr);
4101 + pmd = pmd_offset(pud, addr);
4102 + __section_update(pmd, addr, PMD_SECT_XN);
4103 + }
4104 + /* make kernel code/rodata RX */
4105 + for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
4106 + pgd = pgd_offset_k(addr);
4107 + pud = pud_offset(pgd, addr);
4108 + pmd = pmd_offset(pud, addr);
4109 +#ifdef CONFIG_ARM_LPAE
4110 + __section_update(pmd, addr, PMD_SECT_RDONLY);
4111 +#else
4112 + __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
4113 +#endif
4114 + }
4115 + }
4116 +#endif
4117 +
4118 +#ifdef CONFIG_HAVE_TCM
4119 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
4120 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
4121 #endif
4122 diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
4123 index f123d6e..04bf569 100644
4124 --- a/arch/arm/mm/ioremap.c
4125 +++ b/arch/arm/mm/ioremap.c
4126 @@ -392,9 +392,9 @@ __arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
4127 unsigned int mtype;
4128
4129 if (cached)
4130 - mtype = MT_MEMORY;
4131 + mtype = MT_MEMORY_RX;
4132 else
4133 - mtype = MT_MEMORY_NONCACHED;
4134 + mtype = MT_MEMORY_NONCACHED_RX;
4135
4136 return __arm_ioremap_caller(phys_addr, size, mtype,
4137 __builtin_return_address(0));
4138 diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
4139 index 304661d..53a6b19 100644
4140 --- a/arch/arm/mm/mmap.c
4141 +++ b/arch/arm/mm/mmap.c
4142 @@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4143 struct vm_area_struct *vma;
4144 int do_align = 0;
4145 int aliasing = cache_is_vipt_aliasing();
4146 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4147 struct vm_unmapped_area_info info;
4148
4149 /*
4150 @@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4151 if (len > TASK_SIZE)
4152 return -ENOMEM;
4153
4154 +#ifdef CONFIG_PAX_RANDMMAP
4155 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4156 +#endif
4157 +
4158 if (addr) {
4159 if (do_align)
4160 addr = COLOUR_ALIGN(addr, pgoff);
4161 @@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4162 addr = PAGE_ALIGN(addr);
4163
4164 vma = find_vma(mm, addr);
4165 - if (TASK_SIZE - len >= addr &&
4166 - (!vma || addr + len <= vma->vm_start))
4167 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4168 return addr;
4169 }
4170
4171 @@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
4172 info.high_limit = TASK_SIZE;
4173 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4174 info.align_offset = pgoff << PAGE_SHIFT;
4175 + info.threadstack_offset = offset;
4176 return vm_unmapped_area(&info);
4177 }
4178
4179 @@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4180 unsigned long addr = addr0;
4181 int do_align = 0;
4182 int aliasing = cache_is_vipt_aliasing();
4183 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4184 struct vm_unmapped_area_info info;
4185
4186 /*
4187 @@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4188 return addr;
4189 }
4190
4191 +#ifdef CONFIG_PAX_RANDMMAP
4192 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4193 +#endif
4194 +
4195 /* requesting a specific address */
4196 if (addr) {
4197 if (do_align)
4198 @@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4199 else
4200 addr = PAGE_ALIGN(addr);
4201 vma = find_vma(mm, addr);
4202 - if (TASK_SIZE - len >= addr &&
4203 - (!vma || addr + len <= vma->vm_start))
4204 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4205 return addr;
4206 }
4207
4208 @@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4209 info.high_limit = mm->mmap_base;
4210 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
4211 info.align_offset = pgoff << PAGE_SHIFT;
4212 + info.threadstack_offset = offset;
4213 addr = vm_unmapped_area(&info);
4214
4215 /*
4216 @@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4217 {
4218 unsigned long random_factor = 0UL;
4219
4220 +#ifdef CONFIG_PAX_RANDMMAP
4221 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4222 +#endif
4223 +
4224 /* 8 bits of randomness in 20 address space bits */
4225 if ((current->flags & PF_RANDOMIZE) &&
4226 !(current->personality & ADDR_NO_RANDOMIZE))
4227 @@ -180,9 +194,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4228
4229 if (mmap_is_legacy()) {
4230 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4231 +
4232 +#ifdef CONFIG_PAX_RANDMMAP
4233 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4234 + mm->mmap_base += mm->delta_mmap;
4235 +#endif
4236 +
4237 mm->get_unmapped_area = arch_get_unmapped_area;
4238 } else {
4239 mm->mmap_base = mmap_base(random_factor);
4240 +
4241 +#ifdef CONFIG_PAX_RANDMMAP
4242 + if (mm->pax_flags & MF_PAX_RANDMMAP)
4243 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4244 +#endif
4245 +
4246 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4247 }
4248 }
4249 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
4250 index b1d17ee..7a6f4d3 100644
4251 --- a/arch/arm/mm/mmu.c
4252 +++ b/arch/arm/mm/mmu.c
4253 @@ -36,6 +36,22 @@
4254 #include "mm.h"
4255 #include "tcm.h"
4256
4257 +#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
4258 +void modify_domain(unsigned int dom, unsigned int type)
4259 +{
4260 + struct thread_info *thread = current_thread_info();
4261 + unsigned int domain = thread->cpu_domain;
4262 + /*
4263 + * DOMAIN_MANAGER might be defined to some other value,
4264 + * use the arch-defined constant
4265 + */
4266 + domain &= ~domain_val(dom, 3);
4267 + thread->cpu_domain = domain | domain_val(dom, type);
4268 + set_domain(thread->cpu_domain);
4269 +}
4270 +EXPORT_SYMBOL(modify_domain);
4271 +#endif
4272 +
4273 /*
4274 * empty_zero_page is a special page that is used for
4275 * zero-initialized data and COW.
4276 @@ -228,10 +244,18 @@ __setup("noalign", noalign_setup);
4277
4278 #endif /* ifdef CONFIG_CPU_CP15 / else */
4279
4280 -#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4281 +#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4282 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4283
4284 -static struct mem_type mem_types[] = {
4285 +#ifdef CONFIG_PAX_KERNEXEC
4286 +#define L_PTE_KERNEXEC L_PTE_RDONLY
4287 +#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4288 +#else
4289 +#define L_PTE_KERNEXEC L_PTE_DIRTY
4290 +#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4291 +#endif
4292 +
4293 +static struct mem_type mem_types[] __read_only = {
4294 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4295 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4296 L_PTE_SHARED,
4297 @@ -260,16 +284,16 @@ static struct mem_type mem_types[] = {
4298 [MT_UNCACHED] = {
4299 .prot_pte = PROT_PTE_DEVICE,
4300 .prot_l1 = PMD_TYPE_TABLE,
4301 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4302 + .prot_sect = PROT_SECT_DEVICE,
4303 .domain = DOMAIN_IO,
4304 },
4305 [MT_CACHECLEAN] = {
4306 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4307 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4308 .domain = DOMAIN_KERNEL,
4309 },
4310 #ifndef CONFIG_ARM_LPAE
4311 [MT_MINICLEAN] = {
4312 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4313 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4314 .domain = DOMAIN_KERNEL,
4315 },
4316 #endif
4317 @@ -277,36 +301,54 @@ static struct mem_type mem_types[] = {
4318 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4319 L_PTE_RDONLY,
4320 .prot_l1 = PMD_TYPE_TABLE,
4321 - .domain = DOMAIN_USER,
4322 + .domain = DOMAIN_VECTORS,
4323 },
4324 [MT_HIGH_VECTORS] = {
4325 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4326 L_PTE_USER | L_PTE_RDONLY,
4327 .prot_l1 = PMD_TYPE_TABLE,
4328 - .domain = DOMAIN_USER,
4329 + .domain = DOMAIN_VECTORS,
4330 },
4331 - [MT_MEMORY] = {
4332 + [MT_MEMORY_RWX] = {
4333 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4334 .prot_l1 = PMD_TYPE_TABLE,
4335 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4336 .domain = DOMAIN_KERNEL,
4337 },
4338 + [MT_MEMORY_RW] = {
4339 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4340 + .prot_l1 = PMD_TYPE_TABLE,
4341 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4342 + .domain = DOMAIN_KERNEL,
4343 + },
4344 + [MT_MEMORY_RX] = {
4345 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4346 + .prot_l1 = PMD_TYPE_TABLE,
4347 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4348 + .domain = DOMAIN_KERNEL,
4349 + },
4350 [MT_ROM] = {
4351 - .prot_sect = PMD_TYPE_SECT,
4352 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4353 .domain = DOMAIN_KERNEL,
4354 },
4355 - [MT_MEMORY_NONCACHED] = {
4356 + [MT_MEMORY_NONCACHED_RW] = {
4357 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4358 L_PTE_MT_BUFFERABLE,
4359 .prot_l1 = PMD_TYPE_TABLE,
4360 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4361 .domain = DOMAIN_KERNEL,
4362 },
4363 + [MT_MEMORY_NONCACHED_RX] = {
4364 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4365 + L_PTE_MT_BUFFERABLE,
4366 + .prot_l1 = PMD_TYPE_TABLE,
4367 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4368 + .domain = DOMAIN_KERNEL,
4369 + },
4370 [MT_MEMORY_DTCM] = {
4371 - .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4372 - L_PTE_XN,
4373 + .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4374 .prot_l1 = PMD_TYPE_TABLE,
4375 - .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4376 + .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4377 .domain = DOMAIN_KERNEL,
4378 },
4379 [MT_MEMORY_ITCM] = {
4380 @@ -316,10 +358,10 @@ static struct mem_type mem_types[] = {
4381 },
4382 [MT_MEMORY_SO] = {
4383 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4384 - L_PTE_MT_UNCACHED | L_PTE_XN,
4385 + L_PTE_MT_UNCACHED,
4386 .prot_l1 = PMD_TYPE_TABLE,
4387 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4388 - PMD_SECT_UNCACHED | PMD_SECT_XN,
4389 + PMD_SECT_UNCACHED,
4390 .domain = DOMAIN_KERNEL,
4391 },
4392 [MT_MEMORY_DMA_READY] = {
4393 @@ -405,9 +447,35 @@ static void __init build_mem_type_table(void)
4394 * to prevent speculative instruction fetches.
4395 */
4396 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4397 + mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4398 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4399 + mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4400 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4401 + mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4402 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4403 + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4404 +
4405 + /* Mark other regions on ARMv6+ as execute-never */
4406 +
4407 +#ifdef CONFIG_PAX_KERNEXEC
4408 + mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4409 + mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4410 + mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4411 + mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4412 +#ifndef CONFIG_ARM_LPAE
4413 + mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4414 + mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4415 +#endif
4416 + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4417 + mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4418 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4419 + mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4420 + mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4421 + mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4422 +#endif
4423 +
4424 + mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4425 + mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4426 }
4427 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4428 /*
4429 @@ -468,6 +536,9 @@ static void __init build_mem_type_table(void)
4430 * from SVC mode and no access from userspace.
4431 */
4432 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4433 +#ifdef CONFIG_PAX_KERNEXEC
4434 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4435 +#endif
4436 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4437 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4438 #endif
4439 @@ -485,11 +556,17 @@ static void __init build_mem_type_table(void)
4440 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4441 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4442 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4443 - mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4444 - mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4445 + mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4446 + mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4447 + mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4448 + mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4449 + mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4450 + mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4451 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4452 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4453 - mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4454 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4455 + mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4456 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4457 + mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4458 }
4459 }
4460
4461 @@ -500,15 +577,20 @@ static void __init build_mem_type_table(void)
4462 if (cpu_arch >= CPU_ARCH_ARMv6) {
4463 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4464 /* Non-cacheable Normal is XCB = 001 */
4465 - mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4466 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4467 + PMD_SECT_BUFFERED;
4468 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4469 PMD_SECT_BUFFERED;
4470 } else {
4471 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4472 - mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4473 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4474 + PMD_SECT_TEX(1);
4475 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4476 PMD_SECT_TEX(1);
4477 }
4478 } else {
4479 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4480 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4481 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4482 }
4483
4484 #ifdef CONFIG_ARM_LPAE
4485 @@ -524,6 +606,8 @@ static void __init build_mem_type_table(void)
4486 vecs_pgprot |= PTE_EXT_AF;
4487 #endif
4488
4489 + user_pgprot |= __supported_pte_mask;
4490 +
4491 for (i = 0; i < 16; i++) {
4492 pteval_t v = pgprot_val(protection_map[i]);
4493 protection_map[i] = __pgprot(v | user_pgprot);
4494 @@ -541,10 +625,15 @@ static void __init build_mem_type_table(void)
4495
4496 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4497 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4498 - mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4499 - mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4500 + mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4501 + mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4502 + mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4503 + mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4504 + mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4505 + mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4506 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4507 - mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4508 + mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4509 + mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4510 mem_types[MT_ROM].prot_sect |= cp->pmd;
4511
4512 switch (cp->pmd) {
4513 @@ -1186,18 +1275,15 @@ void __init arm_mm_memblock_reserve(void)
4514 * called function. This means you can't use any function or debugging
4515 * method which may touch any device, otherwise the kernel _will_ crash.
4516 */
4517 +
4518 +static char vectors[PAGE_SIZE * 2] __read_only __aligned(PAGE_SIZE);
4519 +
4520 static void __init devicemaps_init(const struct machine_desc *mdesc)
4521 {
4522 struct map_desc map;
4523 unsigned long addr;
4524 - void *vectors;
4525
4526 - /*
4527 - * Allocate the vector page early.
4528 - */
4529 - vectors = early_alloc(PAGE_SIZE * 2);
4530 -
4531 - early_trap_init(vectors);
4532 + early_trap_init(&vectors);
4533
4534 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4535 pmd_clear(pmd_off_k(addr));
4536 @@ -1237,7 +1323,7 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
4537 * location (0xffff0000). If we aren't using high-vectors, also
4538 * create a mapping at the low-vectors virtual address.
4539 */
4540 - map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4541 + map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4542 map.virtual = 0xffff0000;
4543 map.length = PAGE_SIZE;
4544 #ifdef CONFIG_KUSER_HELPERS
4545 @@ -1309,8 +1395,39 @@ static void __init map_lowmem(void)
4546 map.pfn = __phys_to_pfn(start);
4547 map.virtual = __phys_to_virt(start);
4548 map.length = end - start;
4549 - map.type = MT_MEMORY;
4550
4551 +#ifdef CONFIG_PAX_KERNEXEC
4552 + if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4553 + struct map_desc kernel;
4554 + struct map_desc initmap;
4555 +
4556 + /* when freeing initmem we will make this RW */
4557 + initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4558 + initmap.virtual = (unsigned long)__init_begin;
4559 + initmap.length = _sdata - __init_begin;
4560 + initmap.type = MT_MEMORY_RWX;
4561 + create_mapping(&initmap);
4562 +
4563 + /* when freeing initmem we will make this RX */
4564 + kernel.pfn = __phys_to_pfn(__pa(_stext));
4565 + kernel.virtual = (unsigned long)_stext;
4566 + kernel.length = __init_begin - _stext;
4567 + kernel.type = MT_MEMORY_RWX;
4568 + create_mapping(&kernel);
4569 +
4570 + if (map.virtual < (unsigned long)_stext) {
4571 + map.length = (unsigned long)_stext - map.virtual;
4572 + map.type = MT_MEMORY_RWX;
4573 + create_mapping(&map);
4574 + }
4575 +
4576 + map.pfn = __phys_to_pfn(__pa(_sdata));
4577 + map.virtual = (unsigned long)_sdata;
4578 + map.length = end - __pa(_sdata);
4579 + }
4580 +#endif
4581 +
4582 + map.type = MT_MEMORY_RW;
4583 create_mapping(&map);
4584 }
4585 }
4586 diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4587 index a5bc92d..0bb4730 100644
4588 --- a/arch/arm/plat-omap/sram.c
4589 +++ b/arch/arm/plat-omap/sram.c
4590 @@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4591 * Looks like we need to preserve some bootloader code at the
4592 * beginning of SRAM for jumping to flash for reboot to work...
4593 */
4594 + pax_open_kernel();
4595 memset_io(omap_sram_base + omap_sram_skip, 0,
4596 omap_sram_size - omap_sram_skip);
4597 + pax_close_kernel();
4598 }
4599 diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4600 index ce6d763..cfea917 100644
4601 --- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4602 +++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4603 @@ -47,7 +47,7 @@ struct samsung_dma_ops {
4604 int (*started)(unsigned ch);
4605 int (*flush)(unsigned ch);
4606 int (*stop)(unsigned ch);
4607 -};
4608 +} __no_const;
4609
4610 extern void *samsung_dmadev_get_ops(void);
4611 extern void *s3c_dma_get_ops(void);
4612 diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4613 index c3a58a1..78fbf54 100644
4614 --- a/arch/avr32/include/asm/cache.h
4615 +++ b/arch/avr32/include/asm/cache.h
4616 @@ -1,8 +1,10 @@
4617 #ifndef __ASM_AVR32_CACHE_H
4618 #define __ASM_AVR32_CACHE_H
4619
4620 +#include <linux/const.h>
4621 +
4622 #define L1_CACHE_SHIFT 5
4623 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4624 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4625
4626 /*
4627 * Memory returned by kmalloc() may be used for DMA, so we must make
4628 diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4629 index d232888..87c8df1 100644
4630 --- a/arch/avr32/include/asm/elf.h
4631 +++ b/arch/avr32/include/asm/elf.h
4632 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4633 the loader. We need to make sure that it is out of the way of the program
4634 that it will "exec", and that there is sufficient room for the brk. */
4635
4636 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4637 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4638
4639 +#ifdef CONFIG_PAX_ASLR
4640 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4641 +
4642 +#define PAX_DELTA_MMAP_LEN 15
4643 +#define PAX_DELTA_STACK_LEN 15
4644 +#endif
4645
4646 /* This yields a mask that user programs can use to figure out what
4647 instruction set this CPU supports. This could be done in user space,
4648 diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4649 index 479330b..53717a8 100644
4650 --- a/arch/avr32/include/asm/kmap_types.h
4651 +++ b/arch/avr32/include/asm/kmap_types.h
4652 @@ -2,9 +2,9 @@
4653 #define __ASM_AVR32_KMAP_TYPES_H
4654
4655 #ifdef CONFIG_DEBUG_HIGHMEM
4656 -# define KM_TYPE_NR 29
4657 +# define KM_TYPE_NR 30
4658 #else
4659 -# define KM_TYPE_NR 14
4660 +# define KM_TYPE_NR 15
4661 #endif
4662
4663 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4664 diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4665 index 0eca933..eb78c7b 100644
4666 --- a/arch/avr32/mm/fault.c
4667 +++ b/arch/avr32/mm/fault.c
4668 @@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4669
4670 int exception_trace = 1;
4671
4672 +#ifdef CONFIG_PAX_PAGEEXEC
4673 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4674 +{
4675 + unsigned long i;
4676 +
4677 + printk(KERN_ERR "PAX: bytes at PC: ");
4678 + for (i = 0; i < 20; i++) {
4679 + unsigned char c;
4680 + if (get_user(c, (unsigned char *)pc+i))
4681 + printk(KERN_CONT "???????? ");
4682 + else
4683 + printk(KERN_CONT "%02x ", c);
4684 + }
4685 + printk("\n");
4686 +}
4687 +#endif
4688 +
4689 /*
4690 * This routine handles page faults. It determines the address and the
4691 * problem, and then passes it off to one of the appropriate routines.
4692 @@ -176,6 +193,16 @@ bad_area:
4693 up_read(&mm->mmap_sem);
4694
4695 if (user_mode(regs)) {
4696 +
4697 +#ifdef CONFIG_PAX_PAGEEXEC
4698 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4699 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4700 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4701 + do_group_exit(SIGKILL);
4702 + }
4703 + }
4704 +#endif
4705 +
4706 if (exception_trace && printk_ratelimit())
4707 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4708 "sp %08lx ecr %lu\n",
4709 diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4710 index 568885a..f8008df 100644
4711 --- a/arch/blackfin/include/asm/cache.h
4712 +++ b/arch/blackfin/include/asm/cache.h
4713 @@ -7,6 +7,7 @@
4714 #ifndef __ARCH_BLACKFIN_CACHE_H
4715 #define __ARCH_BLACKFIN_CACHE_H
4716
4717 +#include <linux/const.h>
4718 #include <linux/linkage.h> /* for asmlinkage */
4719
4720 /*
4721 @@ -14,7 +15,7 @@
4722 * Blackfin loads 32 bytes for cache
4723 */
4724 #define L1_CACHE_SHIFT 5
4725 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4726 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4727 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4728
4729 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4730 diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4731 index aea2718..3639a60 100644
4732 --- a/arch/cris/include/arch-v10/arch/cache.h
4733 +++ b/arch/cris/include/arch-v10/arch/cache.h
4734 @@ -1,8 +1,9 @@
4735 #ifndef _ASM_ARCH_CACHE_H
4736 #define _ASM_ARCH_CACHE_H
4737
4738 +#include <linux/const.h>
4739 /* Etrax 100LX have 32-byte cache-lines. */
4740 -#define L1_CACHE_BYTES 32
4741 #define L1_CACHE_SHIFT 5
4742 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4743
4744 #endif /* _ASM_ARCH_CACHE_H */
4745 diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4746 index 7caf25d..ee65ac5 100644
4747 --- a/arch/cris/include/arch-v32/arch/cache.h
4748 +++ b/arch/cris/include/arch-v32/arch/cache.h
4749 @@ -1,11 +1,12 @@
4750 #ifndef _ASM_CRIS_ARCH_CACHE_H
4751 #define _ASM_CRIS_ARCH_CACHE_H
4752
4753 +#include <linux/const.h>
4754 #include <arch/hwregs/dma.h>
4755
4756 /* A cache-line is 32 bytes. */
4757 -#define L1_CACHE_BYTES 32
4758 #define L1_CACHE_SHIFT 5
4759 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4760
4761 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4762
4763 diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4764 index b86329d..6709906 100644
4765 --- a/arch/frv/include/asm/atomic.h
4766 +++ b/arch/frv/include/asm/atomic.h
4767 @@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4768 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4769 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4770
4771 +#define atomic64_read_unchecked(v) atomic64_read(v)
4772 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4773 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4774 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4775 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4776 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4777 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4778 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4779 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4780 +
4781 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4782 {
4783 int c, old;
4784 diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4785 index 2797163..c2a401d 100644
4786 --- a/arch/frv/include/asm/cache.h
4787 +++ b/arch/frv/include/asm/cache.h
4788 @@ -12,10 +12,11 @@
4789 #ifndef __ASM_CACHE_H
4790 #define __ASM_CACHE_H
4791
4792 +#include <linux/const.h>
4793
4794 /* bytes per L1 cache line */
4795 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4796 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4797 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4798
4799 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4800 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4801 diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4802 index 43901f2..0d8b865 100644
4803 --- a/arch/frv/include/asm/kmap_types.h
4804 +++ b/arch/frv/include/asm/kmap_types.h
4805 @@ -2,6 +2,6 @@
4806 #ifndef _ASM_KMAP_TYPES_H
4807 #define _ASM_KMAP_TYPES_H
4808
4809 -#define KM_TYPE_NR 17
4810 +#define KM_TYPE_NR 18
4811
4812 #endif
4813 diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4814 index 836f147..4cf23f5 100644
4815 --- a/arch/frv/mm/elf-fdpic.c
4816 +++ b/arch/frv/mm/elf-fdpic.c
4817 @@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4818 {
4819 struct vm_area_struct *vma;
4820 struct vm_unmapped_area_info info;
4821 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4822
4823 if (len > TASK_SIZE)
4824 return -ENOMEM;
4825 @@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4826 if (addr) {
4827 addr = PAGE_ALIGN(addr);
4828 vma = find_vma(current->mm, addr);
4829 - if (TASK_SIZE - len >= addr &&
4830 - (!vma || addr + len <= vma->vm_start))
4831 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4832 goto success;
4833 }
4834
4835 @@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4836 info.high_limit = (current->mm->start_stack - 0x00200000);
4837 info.align_mask = 0;
4838 info.align_offset = 0;
4839 + info.threadstack_offset = offset;
4840 addr = vm_unmapped_area(&info);
4841 if (!(addr & ~PAGE_MASK))
4842 goto success;
4843 diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4844 index f4ca594..adc72fd6 100644
4845 --- a/arch/hexagon/include/asm/cache.h
4846 +++ b/arch/hexagon/include/asm/cache.h
4847 @@ -21,9 +21,11 @@
4848 #ifndef __ASM_CACHE_H
4849 #define __ASM_CACHE_H
4850
4851 +#include <linux/const.h>
4852 +
4853 /* Bytes per L1 cache line */
4854 -#define L1_CACHE_SHIFT (5)
4855 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4856 +#define L1_CACHE_SHIFT 5
4857 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4858
4859 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4860 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4861 diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
4862 index 7740ab1..17fa8c5 100644
4863 --- a/arch/ia64/Kconfig
4864 +++ b/arch/ia64/Kconfig
4865 @@ -554,6 +554,7 @@ source "drivers/sn/Kconfig"
4866 config KEXEC
4867 bool "kexec system call"
4868 depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
4869 + depends on !GRKERNSEC_KMEM
4870 help
4871 kexec is a system call that implements the ability to shutdown your
4872 current kernel, and to start another kernel. It is like a reboot
4873 diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4874 index 6e6fe18..a6ae668 100644
4875 --- a/arch/ia64/include/asm/atomic.h
4876 +++ b/arch/ia64/include/asm/atomic.h
4877 @@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4878 #define atomic64_inc(v) atomic64_add(1, (v))
4879 #define atomic64_dec(v) atomic64_sub(1, (v))
4880
4881 +#define atomic64_read_unchecked(v) atomic64_read(v)
4882 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4883 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4884 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4885 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4886 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
4887 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4888 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
4889 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4890 +
4891 /* Atomic operations are already serializing */
4892 #define smp_mb__before_atomic_dec() barrier()
4893 #define smp_mb__after_atomic_dec() barrier()
4894 diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4895 index 988254a..e1ee885 100644
4896 --- a/arch/ia64/include/asm/cache.h
4897 +++ b/arch/ia64/include/asm/cache.h
4898 @@ -1,6 +1,7 @@
4899 #ifndef _ASM_IA64_CACHE_H
4900 #define _ASM_IA64_CACHE_H
4901
4902 +#include <linux/const.h>
4903
4904 /*
4905 * Copyright (C) 1998-2000 Hewlett-Packard Co
4906 @@ -9,7 +10,7 @@
4907
4908 /* Bytes per L1 (data) cache line. */
4909 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4910 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4911 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4912
4913 #ifdef CONFIG_SMP
4914 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4915 diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4916 index 5a83c5c..4d7f553 100644
4917 --- a/arch/ia64/include/asm/elf.h
4918 +++ b/arch/ia64/include/asm/elf.h
4919 @@ -42,6 +42,13 @@
4920 */
4921 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4922
4923 +#ifdef CONFIG_PAX_ASLR
4924 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4925 +
4926 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4927 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4928 +#endif
4929 +
4930 #define PT_IA_64_UNWIND 0x70000001
4931
4932 /* IA-64 relocations: */
4933 diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4934 index 96a8d92..617a1cf 100644
4935 --- a/arch/ia64/include/asm/pgalloc.h
4936 +++ b/arch/ia64/include/asm/pgalloc.h
4937 @@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4938 pgd_val(*pgd_entry) = __pa(pud);
4939 }
4940
4941 +static inline void
4942 +pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4943 +{
4944 + pgd_populate(mm, pgd_entry, pud);
4945 +}
4946 +
4947 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4948 {
4949 return quicklist_alloc(0, GFP_KERNEL, NULL);
4950 @@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4951 pud_val(*pud_entry) = __pa(pmd);
4952 }
4953
4954 +static inline void
4955 +pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4956 +{
4957 + pud_populate(mm, pud_entry, pmd);
4958 +}
4959 +
4960 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4961 {
4962 return quicklist_alloc(0, GFP_KERNEL, NULL);
4963 diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4964 index 7935115..c0eca6a 100644
4965 --- a/arch/ia64/include/asm/pgtable.h
4966 +++ b/arch/ia64/include/asm/pgtable.h
4967 @@ -12,7 +12,7 @@
4968 * David Mosberger-Tang <davidm@hpl.hp.com>
4969 */
4970
4971 -
4972 +#include <linux/const.h>
4973 #include <asm/mman.h>
4974 #include <asm/page.h>
4975 #include <asm/processor.h>
4976 @@ -142,6 +142,17 @@
4977 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4978 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4979 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4980 +
4981 +#ifdef CONFIG_PAX_PAGEEXEC
4982 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4983 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4984 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4985 +#else
4986 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
4987 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
4988 +# define PAGE_COPY_NOEXEC PAGE_COPY
4989 +#endif
4990 +
4991 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4992 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4993 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4994 diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4995 index 45698cd..e8e2dbc 100644
4996 --- a/arch/ia64/include/asm/spinlock.h
4997 +++ b/arch/ia64/include/asm/spinlock.h
4998 @@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4999 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
5000
5001 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
5002 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
5003 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
5004 }
5005
5006 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
5007 diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
5008 index 449c8c0..18965fb 100644
5009 --- a/arch/ia64/include/asm/uaccess.h
5010 +++ b/arch/ia64/include/asm/uaccess.h
5011 @@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
5012 static inline unsigned long
5013 __copy_to_user (void __user *to, const void *from, unsigned long count)
5014 {
5015 + if (count > INT_MAX)
5016 + return count;
5017 +
5018 + if (!__builtin_constant_p(count))
5019 + check_object_size(from, count, true);
5020 +
5021 return __copy_user(to, (__force void __user *) from, count);
5022 }
5023
5024 static inline unsigned long
5025 __copy_from_user (void *to, const void __user *from, unsigned long count)
5026 {
5027 + if (count > INT_MAX)
5028 + return count;
5029 +
5030 + if (!__builtin_constant_p(count))
5031 + check_object_size(to, count, false);
5032 +
5033 return __copy_user((__force void __user *) to, from, count);
5034 }
5035
5036 @@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5037 ({ \
5038 void __user *__cu_to = (to); \
5039 const void *__cu_from = (from); \
5040 - long __cu_len = (n); \
5041 + unsigned long __cu_len = (n); \
5042 \
5043 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
5044 + if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
5045 + if (!__builtin_constant_p(n)) \
5046 + check_object_size(__cu_from, __cu_len, true); \
5047 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
5048 + } \
5049 __cu_len; \
5050 })
5051
5052 @@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
5053 ({ \
5054 void *__cu_to = (to); \
5055 const void __user *__cu_from = (from); \
5056 - long __cu_len = (n); \
5057 + unsigned long __cu_len = (n); \
5058 \
5059 __chk_user_ptr(__cu_from); \
5060 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
5061 + if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
5062 + if (!__builtin_constant_p(n)) \
5063 + check_object_size(__cu_to, __cu_len, false); \
5064 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
5065 + } \
5066 __cu_len; \
5067 })
5068
5069 diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
5070 index 24603be..948052d 100644
5071 --- a/arch/ia64/kernel/module.c
5072 +++ b/arch/ia64/kernel/module.c
5073 @@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
5074 void
5075 module_free (struct module *mod, void *module_region)
5076 {
5077 - if (mod && mod->arch.init_unw_table &&
5078 - module_region == mod->module_init) {
5079 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
5080 unw_remove_unwind_table(mod->arch.init_unw_table);
5081 mod->arch.init_unw_table = NULL;
5082 }
5083 @@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
5084 }
5085
5086 static inline int
5087 +in_init_rx (const struct module *mod, uint64_t addr)
5088 +{
5089 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
5090 +}
5091 +
5092 +static inline int
5093 +in_init_rw (const struct module *mod, uint64_t addr)
5094 +{
5095 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
5096 +}
5097 +
5098 +static inline int
5099 in_init (const struct module *mod, uint64_t addr)
5100 {
5101 - return addr - (uint64_t) mod->module_init < mod->init_size;
5102 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
5103 +}
5104 +
5105 +static inline int
5106 +in_core_rx (const struct module *mod, uint64_t addr)
5107 +{
5108 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
5109 +}
5110 +
5111 +static inline int
5112 +in_core_rw (const struct module *mod, uint64_t addr)
5113 +{
5114 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
5115 }
5116
5117 static inline int
5118 in_core (const struct module *mod, uint64_t addr)
5119 {
5120 - return addr - (uint64_t) mod->module_core < mod->core_size;
5121 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
5122 }
5123
5124 static inline int
5125 @@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
5126 break;
5127
5128 case RV_BDREL:
5129 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
5130 + if (in_init_rx(mod, val))
5131 + val -= (uint64_t) mod->module_init_rx;
5132 + else if (in_init_rw(mod, val))
5133 + val -= (uint64_t) mod->module_init_rw;
5134 + else if (in_core_rx(mod, val))
5135 + val -= (uint64_t) mod->module_core_rx;
5136 + else if (in_core_rw(mod, val))
5137 + val -= (uint64_t) mod->module_core_rw;
5138 break;
5139
5140 case RV_LTV:
5141 @@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
5142 * addresses have been selected...
5143 */
5144 uint64_t gp;
5145 - if (mod->core_size > MAX_LTOFF)
5146 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
5147 /*
5148 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
5149 * at the end of the module.
5150 */
5151 - gp = mod->core_size - MAX_LTOFF / 2;
5152 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
5153 else
5154 - gp = mod->core_size / 2;
5155 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
5156 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
5157 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
5158 mod->arch.gp = gp;
5159 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
5160 }
5161 diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
5162 index ab33328..f39506c 100644
5163 --- a/arch/ia64/kernel/palinfo.c
5164 +++ b/arch/ia64/kernel/palinfo.c
5165 @@ -980,7 +980,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
5166 return NOTIFY_OK;
5167 }
5168
5169 -static struct notifier_block __refdata palinfo_cpu_notifier =
5170 +static struct notifier_block palinfo_cpu_notifier =
5171 {
5172 .notifier_call = palinfo_cpu_callback,
5173 .priority = 0,
5174 diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
5175 index 41e33f8..65180b2a 100644
5176 --- a/arch/ia64/kernel/sys_ia64.c
5177 +++ b/arch/ia64/kernel/sys_ia64.c
5178 @@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5179 unsigned long align_mask = 0;
5180 struct mm_struct *mm = current->mm;
5181 struct vm_unmapped_area_info info;
5182 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5183
5184 if (len > RGN_MAP_LIMIT)
5185 return -ENOMEM;
5186 @@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5187 if (REGION_NUMBER(addr) == RGN_HPAGE)
5188 addr = 0;
5189 #endif
5190 +
5191 +#ifdef CONFIG_PAX_RANDMMAP
5192 + if (mm->pax_flags & MF_PAX_RANDMMAP)
5193 + addr = mm->free_area_cache;
5194 + else
5195 +#endif
5196 +
5197 if (!addr)
5198 addr = TASK_UNMAPPED_BASE;
5199
5200 @@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5201 info.high_limit = TASK_SIZE;
5202 info.align_mask = align_mask;
5203 info.align_offset = 0;
5204 + info.threadstack_offset = offset;
5205 return vm_unmapped_area(&info);
5206 }
5207
5208 diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5209 index 0ccb28f..8992469 100644
5210 --- a/arch/ia64/kernel/vmlinux.lds.S
5211 +++ b/arch/ia64/kernel/vmlinux.lds.S
5212 @@ -198,7 +198,7 @@ SECTIONS {
5213 /* Per-cpu data: */
5214 . = ALIGN(PERCPU_PAGE_SIZE);
5215 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5216 - __phys_per_cpu_start = __per_cpu_load;
5217 + __phys_per_cpu_start = per_cpu_load;
5218 /*
5219 * ensure percpu data fits
5220 * into percpu page size
5221 diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5222 index 7225dad..2a7c8256 100644
5223 --- a/arch/ia64/mm/fault.c
5224 +++ b/arch/ia64/mm/fault.c
5225 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5226 return pte_present(pte);
5227 }
5228
5229 +#ifdef CONFIG_PAX_PAGEEXEC
5230 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5231 +{
5232 + unsigned long i;
5233 +
5234 + printk(KERN_ERR "PAX: bytes at PC: ");
5235 + for (i = 0; i < 8; i++) {
5236 + unsigned int c;
5237 + if (get_user(c, (unsigned int *)pc+i))
5238 + printk(KERN_CONT "???????? ");
5239 + else
5240 + printk(KERN_CONT "%08x ", c);
5241 + }
5242 + printk("\n");
5243 +}
5244 +#endif
5245 +
5246 # define VM_READ_BIT 0
5247 # define VM_WRITE_BIT 1
5248 # define VM_EXEC_BIT 2
5249 @@ -151,8 +168,21 @@ retry:
5250 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5251 goto bad_area;
5252
5253 - if ((vma->vm_flags & mask) != mask)
5254 + if ((vma->vm_flags & mask) != mask) {
5255 +
5256 +#ifdef CONFIG_PAX_PAGEEXEC
5257 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5258 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5259 + goto bad_area;
5260 +
5261 + up_read(&mm->mmap_sem);
5262 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5263 + do_group_exit(SIGKILL);
5264 + }
5265 +#endif
5266 +
5267 goto bad_area;
5268 + }
5269
5270 /*
5271 * If for any reason at all we couldn't handle the fault, make
5272 diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5273 index 68232db..6ca80af 100644
5274 --- a/arch/ia64/mm/hugetlbpage.c
5275 +++ b/arch/ia64/mm/hugetlbpage.c
5276 @@ -154,6 +154,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5277 unsigned long pgoff, unsigned long flags)
5278 {
5279 struct vm_unmapped_area_info info;
5280 + unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5281
5282 if (len > RGN_MAP_LIMIT)
5283 return -ENOMEM;
5284 @@ -177,6 +178,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5285 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5286 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5287 info.align_offset = 0;
5288 + info.threadstack_offset = offset;
5289 return vm_unmapped_area(&info);
5290 }
5291
5292 diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5293 index b6f7f43..c04320d 100644
5294 --- a/arch/ia64/mm/init.c
5295 +++ b/arch/ia64/mm/init.c
5296 @@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5297 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5298 vma->vm_end = vma->vm_start + PAGE_SIZE;
5299 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5300 +
5301 +#ifdef CONFIG_PAX_PAGEEXEC
5302 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5303 + vma->vm_flags &= ~VM_EXEC;
5304 +
5305 +#ifdef CONFIG_PAX_MPROTECT
5306 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
5307 + vma->vm_flags &= ~VM_MAYEXEC;
5308 +#endif
5309 +
5310 + }
5311 +#endif
5312 +
5313 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5314 down_write(&current->mm->mmap_sem);
5315 if (insert_vm_struct(current->mm, vma)) {
5316 diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5317 index 40b3ee9..8c2c112 100644
5318 --- a/arch/m32r/include/asm/cache.h
5319 +++ b/arch/m32r/include/asm/cache.h
5320 @@ -1,8 +1,10 @@
5321 #ifndef _ASM_M32R_CACHE_H
5322 #define _ASM_M32R_CACHE_H
5323
5324 +#include <linux/const.h>
5325 +
5326 /* L1 cache line size */
5327 #define L1_CACHE_SHIFT 4
5328 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5329 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5330
5331 #endif /* _ASM_M32R_CACHE_H */
5332 diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5333 index 82abd15..d95ae5d 100644
5334 --- a/arch/m32r/lib/usercopy.c
5335 +++ b/arch/m32r/lib/usercopy.c
5336 @@ -14,6 +14,9 @@
5337 unsigned long
5338 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5339 {
5340 + if ((long)n < 0)
5341 + return n;
5342 +
5343 prefetch(from);
5344 if (access_ok(VERIFY_WRITE, to, n))
5345 __copy_user(to,from,n);
5346 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5347 unsigned long
5348 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5349 {
5350 + if ((long)n < 0)
5351 + return n;
5352 +
5353 prefetchw(to);
5354 if (access_ok(VERIFY_READ, from, n))
5355 __copy_user_zeroing(to,from,n);
5356 diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5357 index 0395c51..5f26031 100644
5358 --- a/arch/m68k/include/asm/cache.h
5359 +++ b/arch/m68k/include/asm/cache.h
5360 @@ -4,9 +4,11 @@
5361 #ifndef __ARCH_M68K_CACHE_H
5362 #define __ARCH_M68K_CACHE_H
5363
5364 +#include <linux/const.h>
5365 +
5366 /* bytes per L1 cache line */
5367 #define L1_CACHE_SHIFT 4
5368 -#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5369 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5370
5371 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5372
5373 diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5374 index 0424315..defcca9 100644
5375 --- a/arch/metag/mm/hugetlbpage.c
5376 +++ b/arch/metag/mm/hugetlbpage.c
5377 @@ -205,6 +205,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5378 info.high_limit = TASK_SIZE;
5379 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5380 info.align_offset = 0;
5381 + info.threadstack_offset = 0;
5382 return vm_unmapped_area(&info);
5383 }
5384
5385 diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5386 index 4efe96a..60e8699 100644
5387 --- a/arch/microblaze/include/asm/cache.h
5388 +++ b/arch/microblaze/include/asm/cache.h
5389 @@ -13,11 +13,12 @@
5390 #ifndef _ASM_MICROBLAZE_CACHE_H
5391 #define _ASM_MICROBLAZE_CACHE_H
5392
5393 +#include <linux/const.h>
5394 #include <asm/registers.h>
5395
5396 #define L1_CACHE_SHIFT 5
5397 /* word-granular cache in microblaze */
5398 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5399 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5400
5401 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5402
5403 diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
5404 index f75ab4a..adc6968 100644
5405 --- a/arch/mips/Kconfig
5406 +++ b/arch/mips/Kconfig
5407 @@ -2283,6 +2283,7 @@ source "kernel/Kconfig.preempt"
5408
5409 config KEXEC
5410 bool "Kexec system call"
5411 + depends on !GRKERNSEC_KMEM
5412 help
5413 kexec is a system call that implements the ability to shutdown your
5414 current kernel, and to start another kernel. It is like a reboot
5415 diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5416 index 08b6079..8b554d2 100644
5417 --- a/arch/mips/include/asm/atomic.h
5418 +++ b/arch/mips/include/asm/atomic.h
5419 @@ -21,15 +21,39 @@
5420 #include <asm/cmpxchg.h>
5421 #include <asm/war.h>
5422
5423 +#ifdef CONFIG_GENERIC_ATOMIC64
5424 +#include <asm-generic/atomic64.h>
5425 +#endif
5426 +
5427 #define ATOMIC_INIT(i) { (i) }
5428
5429 +#ifdef CONFIG_64BIT
5430 +#define _ASM_EXTABLE(from, to) \
5431 +" .section __ex_table,\"a\"\n" \
5432 +" .dword " #from ", " #to"\n" \
5433 +" .previous\n"
5434 +#else
5435 +#define _ASM_EXTABLE(from, to) \
5436 +" .section __ex_table,\"a\"\n" \
5437 +" .word " #from ", " #to"\n" \
5438 +" .previous\n"
5439 +#endif
5440 +
5441 /*
5442 * atomic_read - read atomic variable
5443 * @v: pointer of type atomic_t
5444 *
5445 * Atomically reads the value of @v.
5446 */
5447 -#define atomic_read(v) (*(volatile int *)&(v)->counter)
5448 +static inline int atomic_read(const atomic_t *v)
5449 +{
5450 + return (*(volatile const int *) &v->counter);
5451 +}
5452 +
5453 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
5454 +{
5455 + return (*(volatile const int *) &v->counter);
5456 +}
5457
5458 /*
5459 * atomic_set - set atomic variable
5460 @@ -38,7 +62,15 @@
5461 *
5462 * Atomically sets the value of @v to @i.
5463 */
5464 -#define atomic_set(v, i) ((v)->counter = (i))
5465 +static inline void atomic_set(atomic_t *v, int i)
5466 +{
5467 + v->counter = i;
5468 +}
5469 +
5470 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
5471 +{
5472 + v->counter = i;
5473 +}
5474
5475 /*
5476 * atomic_add - add integer to atomic variable
5477 @@ -47,7 +79,67 @@
5478 *
5479 * Atomically adds @i to @v.
5480 */
5481 -static __inline__ void atomic_add(int i, atomic_t * v)
5482 +static __inline__ void atomic_add(int i, atomic_t *v)
5483 +{
5484 + int temp;
5485 +
5486 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
5487 + __asm__ __volatile__(
5488 + " .set mips3 \n"
5489 + "1: ll %0, %1 # atomic_add \n"
5490 +#ifdef CONFIG_PAX_REFCOUNT
5491 + /* Exception on overflow. */
5492 + "2: add %0, %2 \n"
5493 +#else
5494 + " addu %0, %2 \n"
5495 +#endif
5496 + " sc %0, %1 \n"
5497 + " beqzl %0, 1b \n"
5498 +#ifdef CONFIG_PAX_REFCOUNT
5499 + "3: \n"
5500 + _ASM_EXTABLE(2b, 3b)
5501 +#endif
5502 + " .set mips0 \n"
5503 + : "=&r" (temp), "+m" (v->counter)
5504 + : "Ir" (i));
5505 + } else if (kernel_uses_llsc) {
5506 + __asm__ __volatile__(
5507 + " .set mips3 \n"
5508 + "1: ll %0, %1 # atomic_add \n"
5509 +#ifdef CONFIG_PAX_REFCOUNT
5510 + /* Exception on overflow. */
5511 + "2: add %0, %2 \n"
5512 +#else
5513 + " addu %0, %2 \n"
5514 +#endif
5515 + " sc %0, %1 \n"
5516 + " beqz %0, 1b \n"
5517 +#ifdef CONFIG_PAX_REFCOUNT
5518 + "3: \n"
5519 + _ASM_EXTABLE(2b, 3b)
5520 +#endif
5521 + " .set mips0 \n"
5522 + : "=&r" (temp), "+m" (v->counter)
5523 + : "Ir" (i));
5524 + } else {
5525 + unsigned long flags;
5526 +
5527 + raw_local_irq_save(flags);
5528 + __asm__ __volatile__(
5529 +#ifdef CONFIG_PAX_REFCOUNT
5530 + /* Exception on overflow. */
5531 + "1: add %0, %1 \n"
5532 + "2: \n"
5533 + _ASM_EXTABLE(1b, 2b)
5534 +#else
5535 + " addu %0, %1 \n"
5536 +#endif
5537 + : "+r" (v->counter) : "Ir" (i));
5538 + raw_local_irq_restore(flags);
5539 + }
5540 +}
5541 +
5542 +static __inline__ void atomic_add_unchecked(int i, atomic_unchecked_t *v)
5543 {
5544 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5545 int temp;
5546 @@ -90,7 +182,67 @@ static __inline__ void atomic_add(int i, atomic_t * v)
5547 *
5548 * Atomically subtracts @i from @v.
5549 */
5550 -static __inline__ void atomic_sub(int i, atomic_t * v)
5551 +static __inline__ void atomic_sub(int i, atomic_t *v)
5552 +{
5553 + int temp;
5554 +
5555 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
5556 + __asm__ __volatile__(
5557 + " .set mips3 \n"
5558 + "1: ll %0, %1 # atomic64_sub \n"
5559 +#ifdef CONFIG_PAX_REFCOUNT
5560 + /* Exception on overflow. */
5561 + "2: sub %0, %2 \n"
5562 +#else
5563 + " subu %0, %2 \n"
5564 +#endif
5565 + " sc %0, %1 \n"
5566 + " beqzl %0, 1b \n"
5567 +#ifdef CONFIG_PAX_REFCOUNT
5568 + "3: \n"
5569 + _ASM_EXTABLE(2b, 3b)
5570 +#endif
5571 + " .set mips0 \n"
5572 + : "=&r" (temp), "+m" (v->counter)
5573 + : "Ir" (i));
5574 + } else if (kernel_uses_llsc) {
5575 + __asm__ __volatile__(
5576 + " .set mips3 \n"
5577 + "1: ll %0, %1 # atomic64_sub \n"
5578 +#ifdef CONFIG_PAX_REFCOUNT
5579 + /* Exception on overflow. */
5580 + "2: sub %0, %2 \n"
5581 +#else
5582 + " subu %0, %2 \n"
5583 +#endif
5584 + " sc %0, %1 \n"
5585 + " beqz %0, 1b \n"
5586 +#ifdef CONFIG_PAX_REFCOUNT
5587 + "3: \n"
5588 + _ASM_EXTABLE(2b, 3b)
5589 +#endif
5590 + " .set mips0 \n"
5591 + : "=&r" (temp), "+m" (v->counter)
5592 + : "Ir" (i));
5593 + } else {
5594 + unsigned long flags;
5595 +
5596 + raw_local_irq_save(flags);
5597 + __asm__ __volatile__(
5598 +#ifdef CONFIG_PAX_REFCOUNT
5599 + /* Exception on overflow. */
5600 + "1: sub %0, %1 \n"
5601 + "2: \n"
5602 + _ASM_EXTABLE(1b, 2b)
5603 +#else
5604 + " subu %0, %1 \n"
5605 +#endif
5606 + : "+r" (v->counter) : "Ir" (i));
5607 + raw_local_irq_restore(flags);
5608 + }
5609 +}
5610 +
5611 +static __inline__ void atomic_sub_unchecked(long i, atomic_unchecked_t *v)
5612 {
5613 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5614 int temp;
5615 @@ -129,7 +281,93 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
5616 /*
5617 * Same as above, but return the result value
5618 */
5619 -static __inline__ int atomic_add_return(int i, atomic_t * v)
5620 +static __inline__ int atomic_add_return(int i, atomic_t *v)
5621 +{
5622 + int result;
5623 + int temp;
5624 +
5625 + smp_mb__before_llsc();
5626 +
5627 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
5628 + __asm__ __volatile__(
5629 + " .set mips3 \n"
5630 + "1: ll %1, %2 # atomic_add_return \n"
5631 +#ifdef CONFIG_PAX_REFCOUNT
5632 + "2: add %0, %1, %3 \n"
5633 +#else
5634 + " addu %0, %1, %3 \n"
5635 +#endif
5636 + " sc %0, %2 \n"
5637 + " beqzl %0, 1b \n"
5638 +#ifdef CONFIG_PAX_REFCOUNT
5639 + " b 4f \n"
5640 + " .set noreorder \n"
5641 + "3: b 5f \n"
5642 + " move %0, %1 \n"
5643 + " .set reorder \n"
5644 + _ASM_EXTABLE(2b, 3b)
5645 +#endif
5646 + "4: addu %0, %1, %3 \n"
5647 +#ifdef CONFIG_PAX_REFCOUNT
5648 + "5: \n"
5649 +#endif
5650 + " .set mips0 \n"
5651 + : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5652 + : "Ir" (i));
5653 + } else if (kernel_uses_llsc) {
5654 + __asm__ __volatile__(
5655 + " .set mips3 \n"
5656 + "1: ll %1, %2 # atomic_add_return \n"
5657 +#ifdef CONFIG_PAX_REFCOUNT
5658 + "2: add %0, %1, %3 \n"
5659 +#else
5660 + " addu %0, %1, %3 \n"
5661 +#endif
5662 + " sc %0, %2 \n"
5663 + " bnez %0, 4f \n"
5664 + " b 1b \n"
5665 +#ifdef CONFIG_PAX_REFCOUNT
5666 + " .set noreorder \n"
5667 + "3: b 5f \n"
5668 + " move %0, %1 \n"
5669 + " .set reorder \n"
5670 + _ASM_EXTABLE(2b, 3b)
5671 +#endif
5672 + "4: addu %0, %1, %3 \n"
5673 +#ifdef CONFIG_PAX_REFCOUNT
5674 + "5: \n"
5675 +#endif
5676 + " .set mips0 \n"
5677 + : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5678 + : "Ir" (i));
5679 + } else {
5680 + unsigned long flags;
5681 +
5682 + raw_local_irq_save(flags);
5683 + __asm__ __volatile__(
5684 + " lw %0, %1 \n"
5685 +#ifdef CONFIG_PAX_REFCOUNT
5686 + /* Exception on overflow. */
5687 + "1: add %0, %2 \n"
5688 +#else
5689 + " addu %0, %2 \n"
5690 +#endif
5691 + " sw %0, %1 \n"
5692 +#ifdef CONFIG_PAX_REFCOUNT
5693 + /* Note: Dest reg is not modified on overflow */
5694 + "2: \n"
5695 + _ASM_EXTABLE(1b, 2b)
5696 +#endif
5697 + : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5698 + raw_local_irq_restore(flags);
5699 + }
5700 +
5701 + smp_llsc_mb();
5702 +
5703 + return result;
5704 +}
5705 +
5706 +static __inline__ int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
5707 {
5708 int result;
5709
5710 @@ -178,7 +416,93 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
5711 return result;
5712 }
5713
5714 -static __inline__ int atomic_sub_return(int i, atomic_t * v)
5715 +static __inline__ int atomic_sub_return(int i, atomic_t *v)
5716 +{
5717 + int result;
5718 + int temp;
5719 +
5720 + smp_mb__before_llsc();
5721 +
5722 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
5723 + __asm__ __volatile__(
5724 + " .set mips3 \n"
5725 + "1: ll %1, %2 # atomic_sub_return \n"
5726 +#ifdef CONFIG_PAX_REFCOUNT
5727 + "2: sub %0, %1, %3 \n"
5728 +#else
5729 + " subu %0, %1, %3 \n"
5730 +#endif
5731 + " sc %0, %2 \n"
5732 + " beqzl %0, 1b \n"
5733 +#ifdef CONFIG_PAX_REFCOUNT
5734 + " b 4f \n"
5735 + " .set noreorder \n"
5736 + "3: b 5f \n"
5737 + " move %0, %1 \n"
5738 + " .set reorder \n"
5739 + _ASM_EXTABLE(2b, 3b)
5740 +#endif
5741 + "4: subu %0, %1, %3 \n"
5742 +#ifdef CONFIG_PAX_REFCOUNT
5743 + "5: \n"
5744 +#endif
5745 + " .set mips0 \n"
5746 + : "=&r" (result), "=&r" (temp), "=m" (v->counter)
5747 + : "Ir" (i), "m" (v->counter)
5748 + : "memory");
5749 + } else if (kernel_uses_llsc) {
5750 + __asm__ __volatile__(
5751 + " .set mips3 \n"
5752 + "1: ll %1, %2 # atomic_sub_return \n"
5753 +#ifdef CONFIG_PAX_REFCOUNT
5754 + "2: sub %0, %1, %3 \n"
5755 +#else
5756 + " subu %0, %1, %3 \n"
5757 +#endif
5758 + " sc %0, %2 \n"
5759 + " bnez %0, 4f \n"
5760 + " b 1b \n"
5761 +#ifdef CONFIG_PAX_REFCOUNT
5762 + " .set noreorder \n"
5763 + "3: b 5f \n"
5764 + " move %0, %1 \n"
5765 + " .set reorder \n"
5766 + _ASM_EXTABLE(2b, 3b)
5767 +#endif
5768 + "4: subu %0, %1, %3 \n"
5769 +#ifdef CONFIG_PAX_REFCOUNT
5770 + "5: \n"
5771 +#endif
5772 + " .set mips0 \n"
5773 + : "=&r" (result), "=&r" (temp), "+m" (v->counter)
5774 + : "Ir" (i));
5775 + } else {
5776 + unsigned long flags;
5777 +
5778 + raw_local_irq_save(flags);
5779 + __asm__ __volatile__(
5780 + " lw %0, %1 \n"
5781 +#ifdef CONFIG_PAX_REFCOUNT
5782 + /* Exception on overflow. */
5783 + "1: sub %0, %2 \n"
5784 +#else
5785 + " subu %0, %2 \n"
5786 +#endif
5787 + " sw %0, %1 \n"
5788 +#ifdef CONFIG_PAX_REFCOUNT
5789 + /* Note: Dest reg is not modified on overflow */
5790 + "2: \n"
5791 + _ASM_EXTABLE(1b, 2b)
5792 +#endif
5793 + : "=&r" (result), "+m" (v->counter) : "Ir" (i));
5794 + raw_local_irq_restore(flags);
5795 + }
5796 +
5797 + smp_llsc_mb();
5798 +
5799 + return result;
5800 +}
5801 +static __inline__ int atomic_sub_return_unchecked(int i, atomic_unchecked_t *v)
5802 {
5803 int result;
5804
5805 @@ -238,7 +562,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
5806 * Atomically test @v and subtract @i if @v is greater or equal than @i.
5807 * The function returns the old value of @v minus @i.
5808 */
5809 -static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5810 +static __inline__ int atomic_sub_if_positive(int i, atomic_t *v)
5811 {
5812 int result;
5813
5814 @@ -295,8 +619,26 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
5815 return result;
5816 }
5817
5818 -#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
5819 -#define atomic_xchg(v, new) (xchg(&((v)->counter), (new)))
5820 +static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
5821 +{
5822 + return cmpxchg(&v->counter, old, new);
5823 +}
5824 +
5825 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old,
5826 + int new)
5827 +{
5828 + return cmpxchg(&(v->counter), old, new);
5829 +}
5830 +
5831 +static inline int atomic_xchg(atomic_t *v, int new)
5832 +{
5833 + return xchg(&v->counter, new);
5834 +}
5835 +
5836 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
5837 +{
5838 + return xchg(&(v->counter), new);
5839 +}
5840
5841 /**
5842 * __atomic_add_unless - add unless the number is a given value
5843 @@ -324,6 +666,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5844
5845 #define atomic_dec_return(v) atomic_sub_return(1, (v))
5846 #define atomic_inc_return(v) atomic_add_return(1, (v))
5847 +static __inline__ int atomic_inc_return_unchecked(atomic_unchecked_t *v)
5848 +{
5849 + return atomic_add_return_unchecked(1, v);
5850 +}
5851
5852 /*
5853 * atomic_sub_and_test - subtract value from variable and test result
5854 @@ -345,6 +691,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5855 * other cases.
5856 */
5857 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
5858 +static __inline__ int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
5859 +{
5860 + return atomic_add_return_unchecked(1, v) == 0;
5861 +}
5862
5863 /*
5864 * atomic_dec_and_test - decrement by 1 and test
5865 @@ -369,6 +719,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5866 * Atomically increments @v by 1.
5867 */
5868 #define atomic_inc(v) atomic_add(1, (v))
5869 +static __inline__ void atomic_inc_unchecked(atomic_unchecked_t *v)
5870 +{
5871 + atomic_add_unchecked(1, v);
5872 +}
5873
5874 /*
5875 * atomic_dec - decrement and test
5876 @@ -377,6 +731,10 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5877 * Atomically decrements @v by 1.
5878 */
5879 #define atomic_dec(v) atomic_sub(1, (v))
5880 +static __inline__ void atomic_dec_unchecked(atomic_unchecked_t *v)
5881 +{
5882 + atomic_sub_unchecked(1, v);
5883 +}
5884
5885 /*
5886 * atomic_add_negative - add and test if negative
5887 @@ -398,14 +756,30 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5888 * @v: pointer of type atomic64_t
5889 *
5890 */
5891 -#define atomic64_read(v) (*(volatile long *)&(v)->counter)
5892 +static inline long atomic64_read(const atomic64_t *v)
5893 +{
5894 + return (*(volatile const long *) &v->counter);
5895 +}
5896 +
5897 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
5898 +{
5899 + return (*(volatile const long *) &v->counter);
5900 +}
5901
5902 /*
5903 * atomic64_set - set atomic variable
5904 * @v: pointer of type atomic64_t
5905 * @i: required value
5906 */
5907 -#define atomic64_set(v, i) ((v)->counter = (i))
5908 +static inline void atomic64_set(atomic64_t *v, long i)
5909 +{
5910 + v->counter = i;
5911 +}
5912 +
5913 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
5914 +{
5915 + v->counter = i;
5916 +}
5917
5918 /*
5919 * atomic64_add - add integer to atomic variable
5920 @@ -414,7 +788,66 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
5921 *
5922 * Atomically adds @i to @v.
5923 */
5924 -static __inline__ void atomic64_add(long i, atomic64_t * v)
5925 +static __inline__ void atomic64_add(long i, atomic64_t *v)
5926 +{
5927 + long temp;
5928 +
5929 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
5930 + __asm__ __volatile__(
5931 + " .set mips3 \n"
5932 + "1: lld %0, %1 # atomic64_add \n"
5933 +#ifdef CONFIG_PAX_REFCOUNT
5934 + /* Exception on overflow. */
5935 + "2: dadd %0, %2 \n"
5936 +#else
5937 + " daddu %0, %2 \n"
5938 +#endif
5939 + " scd %0, %1 \n"
5940 + " beqzl %0, 1b \n"
5941 +#ifdef CONFIG_PAX_REFCOUNT
5942 + "3: \n"
5943 + _ASM_EXTABLE(2b, 3b)
5944 +#endif
5945 + " .set mips0 \n"
5946 + : "=&r" (temp), "+m" (v->counter)
5947 + : "Ir" (i));
5948 + } else if (kernel_uses_llsc) {
5949 + __asm__ __volatile__(
5950 + " .set mips3 \n"
5951 + "1: lld %0, %1 # atomic64_add \n"
5952 +#ifdef CONFIG_PAX_REFCOUNT
5953 + /* Exception on overflow. */
5954 + "2: dadd %0, %2 \n"
5955 +#else
5956 + " daddu %0, %2 \n"
5957 +#endif
5958 + " scd %0, %1 \n"
5959 + " beqz %0, 1b \n"
5960 +#ifdef CONFIG_PAX_REFCOUNT
5961 + "3: \n"
5962 + _ASM_EXTABLE(2b, 3b)
5963 +#endif
5964 + " .set mips0 \n"
5965 + : "=&r" (temp), "+m" (v->counter)
5966 + : "Ir" (i));
5967 + } else {
5968 + unsigned long flags;
5969 +
5970 + raw_local_irq_save(flags);
5971 + __asm__ __volatile__(
5972 +#ifdef CONFIG_PAX_REFCOUNT
5973 + /* Exception on overflow. */
5974 + "1: dadd %0, %1 \n"
5975 + "2: \n"
5976 + _ASM_EXTABLE(1b, 2b)
5977 +#else
5978 + " daddu %0, %1 \n"
5979 +#endif
5980 + : "+r" (v->counter) : "Ir" (i));
5981 + raw_local_irq_restore(flags);
5982 + }
5983 +}
5984 +static __inline__ void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
5985 {
5986 if (kernel_uses_llsc && R10000_LLSC_WAR) {
5987 long temp;
5988 @@ -457,7 +890,67 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
5989 *
5990 * Atomically subtracts @i from @v.
5991 */
5992 -static __inline__ void atomic64_sub(long i, atomic64_t * v)
5993 +static __inline__ void atomic64_sub(long i, atomic64_t *v)
5994 +{
5995 + long temp;
5996 +
5997 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
5998 + __asm__ __volatile__(
5999 + " .set mips3 \n"
6000 + "1: lld %0, %1 # atomic64_sub \n"
6001 +#ifdef CONFIG_PAX_REFCOUNT
6002 + /* Exception on overflow. */
6003 + "2: dsub %0, %2 \n"
6004 +#else
6005 + " dsubu %0, %2 \n"
6006 +#endif
6007 + " scd %0, %1 \n"
6008 + " beqzl %0, 1b \n"
6009 +#ifdef CONFIG_PAX_REFCOUNT
6010 + "3: \n"
6011 + _ASM_EXTABLE(2b, 3b)
6012 +#endif
6013 + " .set mips0 \n"
6014 + : "=&r" (temp), "+m" (v->counter)
6015 + : "Ir" (i));
6016 + } else if (kernel_uses_llsc) {
6017 + __asm__ __volatile__(
6018 + " .set mips3 \n"
6019 + "1: lld %0, %1 # atomic64_sub \n"
6020 +#ifdef CONFIG_PAX_REFCOUNT
6021 + /* Exception on overflow. */
6022 + "2: dsub %0, %2 \n"
6023 +#else
6024 + " dsubu %0, %2 \n"
6025 +#endif
6026 + " scd %0, %1 \n"
6027 + " beqz %0, 1b \n"
6028 +#ifdef CONFIG_PAX_REFCOUNT
6029 + "3: \n"
6030 + _ASM_EXTABLE(2b, 3b)
6031 +#endif
6032 + " .set mips0 \n"
6033 + : "=&r" (temp), "+m" (v->counter)
6034 + : "Ir" (i));
6035 + } else {
6036 + unsigned long flags;
6037 +
6038 + raw_local_irq_save(flags);
6039 + __asm__ __volatile__(
6040 +#ifdef CONFIG_PAX_REFCOUNT
6041 + /* Exception on overflow. */
6042 + "1: dsub %0, %1 \n"
6043 + "2: \n"
6044 + _ASM_EXTABLE(1b, 2b)
6045 +#else
6046 + " dsubu %0, %1 \n"
6047 +#endif
6048 + : "+r" (v->counter) : "Ir" (i));
6049 + raw_local_irq_restore(flags);
6050 + }
6051 +}
6052 +
6053 +static __inline__ void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6054 {
6055 if (kernel_uses_llsc && R10000_LLSC_WAR) {
6056 long temp;
6057 @@ -496,7 +989,93 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
6058 /*
6059 * Same as above, but return the result value
6060 */
6061 -static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6062 +static __inline__ long atomic64_add_return(long i, atomic64_t *v)
6063 +{
6064 + long result;
6065 + long temp;
6066 +
6067 + smp_mb__before_llsc();
6068 +
6069 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
6070 + __asm__ __volatile__(
6071 + " .set mips3 \n"
6072 + "1: lld %1, %2 # atomic64_add_return \n"
6073 +#ifdef CONFIG_PAX_REFCOUNT
6074 + "2: dadd %0, %1, %3 \n"
6075 +#else
6076 + " daddu %0, %1, %3 \n"
6077 +#endif
6078 + " scd %0, %2 \n"
6079 + " beqzl %0, 1b \n"
6080 +#ifdef CONFIG_PAX_REFCOUNT
6081 + " b 4f \n"
6082 + " .set noreorder \n"
6083 + "3: b 5f \n"
6084 + " move %0, %1 \n"
6085 + " .set reorder \n"
6086 + _ASM_EXTABLE(2b, 3b)
6087 +#endif
6088 + "4: daddu %0, %1, %3 \n"
6089 +#ifdef CONFIG_PAX_REFCOUNT
6090 + "5: \n"
6091 +#endif
6092 + " .set mips0 \n"
6093 + : "=&r" (result), "=&r" (temp), "+m" (v->counter)
6094 + : "Ir" (i));
6095 + } else if (kernel_uses_llsc) {
6096 + __asm__ __volatile__(
6097 + " .set mips3 \n"
6098 + "1: lld %1, %2 # atomic64_add_return \n"
6099 +#ifdef CONFIG_PAX_REFCOUNT
6100 + "2: dadd %0, %1, %3 \n"
6101 +#else
6102 + " daddu %0, %1, %3 \n"
6103 +#endif
6104 + " scd %0, %2 \n"
6105 + " bnez %0, 4f \n"
6106 + " b 1b \n"
6107 +#ifdef CONFIG_PAX_REFCOUNT
6108 + " .set noreorder \n"
6109 + "3: b 5f \n"
6110 + " move %0, %1 \n"
6111 + " .set reorder \n"
6112 + _ASM_EXTABLE(2b, 3b)
6113 +#endif
6114 + "4: daddu %0, %1, %3 \n"
6115 +#ifdef CONFIG_PAX_REFCOUNT
6116 + "5: \n"
6117 +#endif
6118 + " .set mips0 \n"
6119 + : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6120 + : "Ir" (i), "m" (v->counter)
6121 + : "memory");
6122 + } else {
6123 + unsigned long flags;
6124 +
6125 + raw_local_irq_save(flags);
6126 + __asm__ __volatile__(
6127 + " ld %0, %1 \n"
6128 +#ifdef CONFIG_PAX_REFCOUNT
6129 + /* Exception on overflow. */
6130 + "1: dadd %0, %2 \n"
6131 +#else
6132 + " daddu %0, %2 \n"
6133 +#endif
6134 + " sd %0, %1 \n"
6135 +#ifdef CONFIG_PAX_REFCOUNT
6136 + /* Note: Dest reg is not modified on overflow */
6137 + "2: \n"
6138 + _ASM_EXTABLE(1b, 2b)
6139 +#endif
6140 + : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6141 + raw_local_irq_restore(flags);
6142 + }
6143 +
6144 + smp_llsc_mb();
6145 +
6146 + return result;
6147 +}
6148 +static __inline__ long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6149 {
6150 long result;
6151
6152 @@ -546,7 +1125,97 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
6153 return result;
6154 }
6155
6156 -static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6157 +static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
6158 +{
6159 + long result;
6160 + long temp;
6161 +
6162 + smp_mb__before_llsc();
6163 +
6164 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
6165 + long temp;
6166 +
6167 + __asm__ __volatile__(
6168 + " .set mips3 \n"
6169 + "1: lld %1, %2 # atomic64_sub_return \n"
6170 +#ifdef CONFIG_PAX_REFCOUNT
6171 + "2: dsub %0, %1, %3 \n"
6172 +#else
6173 + " dsubu %0, %1, %3 \n"
6174 +#endif
6175 + " scd %0, %2 \n"
6176 + " beqzl %0, 1b \n"
6177 +#ifdef CONFIG_PAX_REFCOUNT
6178 + " b 4f \n"
6179 + " .set noreorder \n"
6180 + "3: b 5f \n"
6181 + " move %0, %1 \n"
6182 + " .set reorder \n"
6183 + _ASM_EXTABLE(2b, 3b)
6184 +#endif
6185 + "4: dsubu %0, %1, %3 \n"
6186 +#ifdef CONFIG_PAX_REFCOUNT
6187 + "5: \n"
6188 +#endif
6189 + " .set mips0 \n"
6190 + : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6191 + : "Ir" (i), "m" (v->counter)
6192 + : "memory");
6193 + } else if (kernel_uses_llsc) {
6194 + __asm__ __volatile__(
6195 + " .set mips3 \n"
6196 + "1: lld %1, %2 # atomic64_sub_return \n"
6197 +#ifdef CONFIG_PAX_REFCOUNT
6198 + "2: dsub %0, %1, %3 \n"
6199 +#else
6200 + " dsubu %0, %1, %3 \n"
6201 +#endif
6202 + " scd %0, %2 \n"
6203 + " bnez %0, 4f \n"
6204 + " b 1b \n"
6205 +#ifdef CONFIG_PAX_REFCOUNT
6206 + " .set noreorder \n"
6207 + "3: b 5f \n"
6208 + " move %0, %1 \n"
6209 + " .set reorder \n"
6210 + _ASM_EXTABLE(2b, 3b)
6211 +#endif
6212 + "4: dsubu %0, %1, %3 \n"
6213 +#ifdef CONFIG_PAX_REFCOUNT
6214 + "5: \n"
6215 +#endif
6216 + " .set mips0 \n"
6217 + : "=&r" (result), "=&r" (temp), "=m" (v->counter)
6218 + : "Ir" (i), "m" (v->counter)
6219 + : "memory");
6220 + } else {
6221 + unsigned long flags;
6222 +
6223 + raw_local_irq_save(flags);
6224 + __asm__ __volatile__(
6225 + " ld %0, %1 \n"
6226 +#ifdef CONFIG_PAX_REFCOUNT
6227 + /* Exception on overflow. */
6228 + "1: dsub %0, %2 \n"
6229 +#else
6230 + " dsubu %0, %2 \n"
6231 +#endif
6232 + " sd %0, %1 \n"
6233 +#ifdef CONFIG_PAX_REFCOUNT
6234 + /* Note: Dest reg is not modified on overflow */
6235 + "2: \n"
6236 + _ASM_EXTABLE(1b, 2b)
6237 +#endif
6238 + : "=&r" (result), "+m" (v->counter) : "Ir" (i));
6239 + raw_local_irq_restore(flags);
6240 + }
6241 +
6242 + smp_llsc_mb();
6243 +
6244 + return result;
6245 +}
6246 +
6247 +static __inline__ long atomic64_sub_return_unchecked(long i, atomic64_unchecked_t *v)
6248 {
6249 long result;
6250
6251 @@ -605,7 +1274,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
6252 * Atomically test @v and subtract @i if @v is greater or equal than @i.
6253 * The function returns the old value of @v minus @i.
6254 */
6255 -static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6256 +static __inline__ long atomic64_sub_if_positive(long i, atomic64_t *v)
6257 {
6258 long result;
6259
6260 @@ -662,9 +1331,26 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
6261 return result;
6262 }
6263
6264 -#define atomic64_cmpxchg(v, o, n) \
6265 - ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
6266 -#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
6267 +static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6268 +{
6269 + return cmpxchg(&v->counter, old, new);
6270 +}
6271 +
6272 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old,
6273 + long new)
6274 +{
6275 + return cmpxchg(&(v->counter), old, new);
6276 +}
6277 +
6278 +static inline long atomic64_xchg(atomic64_t *v, long new)
6279 +{
6280 + return xchg(&v->counter, new);
6281 +}
6282 +
6283 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
6284 +{
6285 + return xchg(&(v->counter), new);
6286 +}
6287
6288 /**
6289 * atomic64_add_unless - add unless the number is a given value
6290 @@ -694,6 +1380,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6291
6292 #define atomic64_dec_return(v) atomic64_sub_return(1, (v))
6293 #define atomic64_inc_return(v) atomic64_add_return(1, (v))
6294 +#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1, (v))
6295
6296 /*
6297 * atomic64_sub_and_test - subtract value from variable and test result
6298 @@ -715,6 +1402,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6299 * other cases.
6300 */
6301 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
6302 +#define atomic64_inc_and_test_unchecked(v) atomic64_add_return_unchecked(1, (v)) == 0)
6303
6304 /*
6305 * atomic64_dec_and_test - decrement by 1 and test
6306 @@ -739,6 +1427,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6307 * Atomically increments @v by 1.
6308 */
6309 #define atomic64_inc(v) atomic64_add(1, (v))
6310 +#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1, (v))
6311
6312 /*
6313 * atomic64_dec - decrement and test
6314 @@ -747,6 +1436,7 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
6315 * Atomically decrements @v by 1.
6316 */
6317 #define atomic64_dec(v) atomic64_sub(1, (v))
6318 +#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1, (v))
6319
6320 /*
6321 * atomic64_add_negative - add and test if negative
6322 diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
6323 index b4db69f..8f3b093 100644
6324 --- a/arch/mips/include/asm/cache.h
6325 +++ b/arch/mips/include/asm/cache.h
6326 @@ -9,10 +9,11 @@
6327 #ifndef _ASM_CACHE_H
6328 #define _ASM_CACHE_H
6329
6330 +#include <linux/const.h>
6331 #include <kmalloc.h>
6332
6333 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
6334 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6335 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6336
6337 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
6338 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6339 diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
6340 index cf3ae24..238d22f 100644
6341 --- a/arch/mips/include/asm/elf.h
6342 +++ b/arch/mips/include/asm/elf.h
6343 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
6344 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
6345 #endif
6346
6347 +#ifdef CONFIG_PAX_ASLR
6348 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6349 +
6350 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6351 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6352 +#endif
6353 +
6354 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
6355 struct linux_binprm;
6356 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6357 int uses_interp);
6358
6359 -struct mm_struct;
6360 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6361 -#define arch_randomize_brk arch_randomize_brk
6362 -
6363 #endif /* _ASM_ELF_H */
6364 diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
6365 index c1f6afa..38cc6e9 100644
6366 --- a/arch/mips/include/asm/exec.h
6367 +++ b/arch/mips/include/asm/exec.h
6368 @@ -12,6 +12,6 @@
6369 #ifndef _ASM_EXEC_H
6370 #define _ASM_EXEC_H
6371
6372 -extern unsigned long arch_align_stack(unsigned long sp);
6373 +#define arch_align_stack(x) ((x) & ~0xfUL)
6374
6375 #endif /* _ASM_EXEC_H */
6376 diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h
6377 index d44622c..64990d2 100644
6378 --- a/arch/mips/include/asm/local.h
6379 +++ b/arch/mips/include/asm/local.h
6380 @@ -12,15 +12,25 @@ typedef struct
6381 atomic_long_t a;
6382 } local_t;
6383
6384 +typedef struct {
6385 + atomic_long_unchecked_t a;
6386 +} local_unchecked_t;
6387 +
6388 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
6389
6390 #define local_read(l) atomic_long_read(&(l)->a)
6391 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
6392 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
6393 +#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
6394
6395 #define local_add(i, l) atomic_long_add((i), (&(l)->a))
6396 +#define local_add_unchecked(i, l) atomic_long_add_unchecked((i), (&(l)->a))
6397 #define local_sub(i, l) atomic_long_sub((i), (&(l)->a))
6398 +#define local_sub_unchecked(i, l) atomic_long_sub_unchecked((i), (&(l)->a))
6399 #define local_inc(l) atomic_long_inc(&(l)->a)
6400 +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
6401 #define local_dec(l) atomic_long_dec(&(l)->a)
6402 +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
6403
6404 /*
6405 * Same as above, but return the result value
6406 @@ -70,6 +80,51 @@ static __inline__ long local_add_return(long i, local_t * l)
6407 return result;
6408 }
6409
6410 +static __inline__ long local_add_return_unchecked(long i, local_unchecked_t * l)
6411 +{
6412 + unsigned long result;
6413 +
6414 + if (kernel_uses_llsc && R10000_LLSC_WAR) {
6415 + unsigned long temp;
6416 +
6417 + __asm__ __volatile__(
6418 + " .set mips3 \n"
6419 + "1:" __LL "%1, %2 # local_add_return \n"
6420 + " addu %0, %1, %3 \n"
6421 + __SC "%0, %2 \n"
6422 + " beqzl %0, 1b \n"
6423 + " addu %0, %1, %3 \n"
6424 + " .set mips0 \n"
6425 + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6426 + : "Ir" (i), "m" (l->a.counter)
6427 + : "memory");
6428 + } else if (kernel_uses_llsc) {
6429 + unsigned long temp;
6430 +
6431 + __asm__ __volatile__(
6432 + " .set mips3 \n"
6433 + "1:" __LL "%1, %2 # local_add_return \n"
6434 + " addu %0, %1, %3 \n"
6435 + __SC "%0, %2 \n"
6436 + " beqz %0, 1b \n"
6437 + " addu %0, %1, %3 \n"
6438 + " .set mips0 \n"
6439 + : "=&r" (result), "=&r" (temp), "=m" (l->a.counter)
6440 + : "Ir" (i), "m" (l->a.counter)
6441 + : "memory");
6442 + } else {
6443 + unsigned long flags;
6444 +
6445 + local_irq_save(flags);
6446 + result = l->a.counter;
6447 + result += i;
6448 + l->a.counter = result;
6449 + local_irq_restore(flags);
6450 + }
6451 +
6452 + return result;
6453 +}
6454 +
6455 static __inline__ long local_sub_return(long i, local_t * l)
6456 {
6457 unsigned long result;
6458 @@ -117,6 +172,8 @@ static __inline__ long local_sub_return(long i, local_t * l)
6459
6460 #define local_cmpxchg(l, o, n) \
6461 ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6462 +#define local_cmpxchg_unchecked(l, o, n) \
6463 + ((long)cmpxchg_local(&((l)->a.counter), (o), (n)))
6464 #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n)))
6465
6466 /**
6467 diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
6468 index f6be474..12ad554 100644
6469 --- a/arch/mips/include/asm/page.h
6470 +++ b/arch/mips/include/asm/page.h
6471 @@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
6472 #ifdef CONFIG_CPU_MIPS32
6473 typedef struct { unsigned long pte_low, pte_high; } pte_t;
6474 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
6475 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
6476 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
6477 #else
6478 typedef struct { unsigned long long pte; } pte_t;
6479 #define pte_val(x) ((x).pte)
6480 diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
6481 index 881d18b..cea38bc 100644
6482 --- a/arch/mips/include/asm/pgalloc.h
6483 +++ b/arch/mips/include/asm/pgalloc.h
6484 @@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6485 {
6486 set_pud(pud, __pud((unsigned long)pmd));
6487 }
6488 +
6489 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6490 +{
6491 + pud_populate(mm, pud, pmd);
6492 +}
6493 #endif
6494
6495 /*
6496 diff --git a/arch/mips/include/asm/smtc_proc.h b/arch/mips/include/asm/smtc_proc.h
6497 index 25da651..ae2a259 100644
6498 --- a/arch/mips/include/asm/smtc_proc.h
6499 +++ b/arch/mips/include/asm/smtc_proc.h
6500 @@ -18,6 +18,6 @@ extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6501
6502 /* Count of number of recoveries of "stolen" FPU access rights on 34K */
6503
6504 -extern atomic_t smtc_fpu_recoveries;
6505 +extern atomic_unchecked_t smtc_fpu_recoveries;
6506
6507 #endif /* __ASM_SMTC_PROC_H */
6508 diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
6509 index 61215a3..213ee0e 100644
6510 --- a/arch/mips/include/asm/thread_info.h
6511 +++ b/arch/mips/include/asm/thread_info.h
6512 @@ -116,6 +116,8 @@ static inline struct thread_info *current_thread_info(void)
6513 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
6514 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
6515 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
6516 +/* li takes a 32bit immediate */
6517 +#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
6518 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
6519
6520 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6521 @@ -132,20 +134,18 @@ static inline struct thread_info *current_thread_info(void)
6522 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
6523 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
6524 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
6525 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6526
6527 -#define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6528 - _TIF_SYSCALL_AUDIT)
6529 +#define _TIF_WORK_SYSCALL_ENTRY (_TIF_NOHZ | _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
6530
6531 /* work to do in syscall_trace_leave() */
6532 -#define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | \
6533 - _TIF_SYSCALL_AUDIT)
6534 +#define _TIF_WORK_SYSCALL_EXIT (_TIF_NOHZ | _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
6535
6536 /* work to do on interrupt/exception return */
6537 #define _TIF_WORK_MASK \
6538 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
6539 /* work to do on any return to u-space */
6540 -#define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | \
6541 - _TIF_WORK_SYSCALL_EXIT)
6542 +#define _TIF_ALLWORK_MASK (_TIF_NOHZ | _TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
6543
6544 #endif /* __KERNEL__ */
6545
6546 diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
6547 index 1188e00..41cf144 100644
6548 --- a/arch/mips/kernel/binfmt_elfn32.c
6549 +++ b/arch/mips/kernel/binfmt_elfn32.c
6550 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6551 #undef ELF_ET_DYN_BASE
6552 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6553
6554 +#ifdef CONFIG_PAX_ASLR
6555 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6556 +
6557 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6558 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6559 +#endif
6560 +
6561 #include <asm/processor.h>
6562 #include <linux/module.h>
6563 #include <linux/elfcore.h>
6564 diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
6565 index 202e581..689ca79 100644
6566 --- a/arch/mips/kernel/binfmt_elfo32.c
6567 +++ b/arch/mips/kernel/binfmt_elfo32.c
6568 @@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
6569 #undef ELF_ET_DYN_BASE
6570 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
6571
6572 +#ifdef CONFIG_PAX_ASLR
6573 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
6574 +
6575 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6576 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
6577 +#endif
6578 +
6579 #include <asm/processor.h>
6580
6581 /*
6582 diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
6583 index d1fea7a..45602ea 100644
6584 --- a/arch/mips/kernel/irq.c
6585 +++ b/arch/mips/kernel/irq.c
6586 @@ -77,17 +77,17 @@ void ack_bad_irq(unsigned int irq)
6587 printk("unexpected IRQ # %d\n", irq);
6588 }
6589
6590 -atomic_t irq_err_count;
6591 +atomic_unchecked_t irq_err_count;
6592
6593 int arch_show_interrupts(struct seq_file *p, int prec)
6594 {
6595 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
6596 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
6597 return 0;
6598 }
6599
6600 asmlinkage void spurious_interrupt(void)
6601 {
6602 - atomic_inc(&irq_err_count);
6603 + atomic_inc_unchecked(&irq_err_count);
6604 }
6605
6606 void __init init_IRQ(void)
6607 diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
6608 index ddc7610..8c58f17 100644
6609 --- a/arch/mips/kernel/process.c
6610 +++ b/arch/mips/kernel/process.c
6611 @@ -566,15 +566,3 @@ unsigned long get_wchan(struct task_struct *task)
6612 out:
6613 return pc;
6614 }
6615 -
6616 -/*
6617 - * Don't forget that the stack pointer must be aligned on a 8 bytes
6618 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
6619 - */
6620 -unsigned long arch_align_stack(unsigned long sp)
6621 -{
6622 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6623 - sp -= get_random_int() & ~PAGE_MASK;
6624 -
6625 - return sp & ALMASK;
6626 -}
6627 diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
6628 index 8ae1ebe..1bcbf47 100644
6629 --- a/arch/mips/kernel/ptrace.c
6630 +++ b/arch/mips/kernel/ptrace.c
6631 @@ -529,6 +529,10 @@ static inline int audit_arch(void)
6632 return arch;
6633 }
6634
6635 +#ifdef CONFIG_GRKERNSEC_SETXID
6636 +extern void gr_delayed_cred_worker(void);
6637 +#endif
6638 +
6639 /*
6640 * Notification of system call entry/exit
6641 * - triggered by current->work.syscall_trace
6642 @@ -540,6 +544,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
6643 /* do the secure computing check first */
6644 secure_computing_strict(regs->regs[2]);
6645
6646 +#ifdef CONFIG_GRKERNSEC_SETXID
6647 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6648 + gr_delayed_cred_worker();
6649 +#endif
6650 +
6651 if (!(current->ptrace & PT_PTRACED))
6652 goto out;
6653
6654 diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
6655 index c10aa84..9ec2e60 100644
6656 --- a/arch/mips/kernel/smtc-proc.c
6657 +++ b/arch/mips/kernel/smtc-proc.c
6658 @@ -31,7 +31,7 @@ unsigned long selfipis[NR_CPUS];
6659
6660 struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
6661
6662 -atomic_t smtc_fpu_recoveries;
6663 +atomic_unchecked_t smtc_fpu_recoveries;
6664
6665 static int smtc_proc_show(struct seq_file *m, void *v)
6666 {
6667 @@ -48,7 +48,7 @@ static int smtc_proc_show(struct seq_file *m, void *v)
6668 for(i = 0; i < NR_CPUS; i++)
6669 seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
6670 seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
6671 - atomic_read(&smtc_fpu_recoveries));
6672 + atomic_read_unchecked(&smtc_fpu_recoveries));
6673 return 0;
6674 }
6675
6676 @@ -73,7 +73,7 @@ void init_smtc_stats(void)
6677 smtc_cpu_stats[i].selfipis = 0;
6678 }
6679
6680 - atomic_set(&smtc_fpu_recoveries, 0);
6681 + atomic_set_unchecked(&smtc_fpu_recoveries, 0);
6682
6683 proc_create("smtc", 0444, NULL, &smtc_proc_fops);
6684 }
6685 diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
6686 index dfc1b91..11a2c07 100644
6687 --- a/arch/mips/kernel/smtc.c
6688 +++ b/arch/mips/kernel/smtc.c
6689 @@ -1359,7 +1359,7 @@ void smtc_soft_dump(void)
6690 }
6691 smtc_ipi_qdump();
6692 printk("%d Recoveries of \"stolen\" FPU\n",
6693 - atomic_read(&smtc_fpu_recoveries));
6694 + atomic_read_unchecked(&smtc_fpu_recoveries));
6695 }
6696
6697
6698 diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
6699 index 84536bf..79caa4d 100644
6700 --- a/arch/mips/kernel/sync-r4k.c
6701 +++ b/arch/mips/kernel/sync-r4k.c
6702 @@ -21,8 +21,8 @@
6703 #include <asm/mipsregs.h>
6704
6705 static atomic_t count_start_flag = ATOMIC_INIT(0);
6706 -static atomic_t count_count_start = ATOMIC_INIT(0);
6707 -static atomic_t count_count_stop = ATOMIC_INIT(0);
6708 +static atomic_unchecked_t count_count_start = ATOMIC_INIT(0);
6709 +static atomic_unchecked_t count_count_stop = ATOMIC_INIT(0);
6710 static atomic_t count_reference = ATOMIC_INIT(0);
6711
6712 #define COUNTON 100
6713 @@ -69,13 +69,13 @@ void synchronise_count_master(int cpu)
6714
6715 for (i = 0; i < NR_LOOPS; i++) {
6716 /* slaves loop on '!= 2' */
6717 - while (atomic_read(&count_count_start) != 1)
6718 + while (atomic_read_unchecked(&count_count_start) != 1)
6719 mb();
6720 - atomic_set(&count_count_stop, 0);
6721 + atomic_set_unchecked(&count_count_stop, 0);
6722 smp_wmb();
6723
6724 /* this lets the slaves write their count register */
6725 - atomic_inc(&count_count_start);
6726 + atomic_inc_unchecked(&count_count_start);
6727
6728 /*
6729 * Everyone initialises count in the last loop:
6730 @@ -86,11 +86,11 @@ void synchronise_count_master(int cpu)
6731 /*
6732 * Wait for all slaves to leave the synchronization point:
6733 */
6734 - while (atomic_read(&count_count_stop) != 1)
6735 + while (atomic_read_unchecked(&count_count_stop) != 1)
6736 mb();
6737 - atomic_set(&count_count_start, 0);
6738 + atomic_set_unchecked(&count_count_start, 0);
6739 smp_wmb();
6740 - atomic_inc(&count_count_stop);
6741 + atomic_inc_unchecked(&count_count_stop);
6742 }
6743 /* Arrange for an interrupt in a short while */
6744 write_c0_compare(read_c0_count() + COUNTON);
6745 @@ -131,8 +131,8 @@ void synchronise_count_slave(int cpu)
6746 initcount = atomic_read(&count_reference);
6747
6748 for (i = 0; i < NR_LOOPS; i++) {
6749 - atomic_inc(&count_count_start);
6750 - while (atomic_read(&count_count_start) != 2)
6751 + atomic_inc_unchecked(&count_count_start);
6752 + while (atomic_read_unchecked(&count_count_start) != 2)
6753 mb();
6754
6755 /*
6756 @@ -141,8 +141,8 @@ void synchronise_count_slave(int cpu)
6757 if (i == NR_LOOPS-1)
6758 write_c0_count(initcount);
6759
6760 - atomic_inc(&count_count_stop);
6761 - while (atomic_read(&count_count_stop) != 2)
6762 + atomic_inc_unchecked(&count_count_stop);
6763 + while (atomic_read_unchecked(&count_count_stop) != 2)
6764 mb();
6765 }
6766 /* Arrange for an interrupt in a short while */
6767 diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
6768 index 524841f..3eef41e 100644
6769 --- a/arch/mips/kernel/traps.c
6770 +++ b/arch/mips/kernel/traps.c
6771 @@ -684,7 +684,18 @@ asmlinkage void do_ov(struct pt_regs *regs)
6772 siginfo_t info;
6773
6774 prev_state = exception_enter();
6775 - die_if_kernel("Integer overflow", regs);
6776 + if (unlikely(!user_mode(regs))) {
6777 +
6778 +#ifdef CONFIG_PAX_REFCOUNT
6779 + if (fixup_exception(regs)) {
6780 + pax_report_refcount_overflow(regs);
6781 + exception_exit(prev_state);
6782 + return;
6783 + }
6784 +#endif
6785 +
6786 + die("Integer overflow", regs);
6787 + }
6788
6789 info.si_code = FPE_INTOVF;
6790 info.si_signo = SIGFPE;
6791 diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
6792 index becc42b..9e43d4b 100644
6793 --- a/arch/mips/mm/fault.c
6794 +++ b/arch/mips/mm/fault.c
6795 @@ -28,6 +28,23 @@
6796 #include <asm/highmem.h> /* For VMALLOC_END */
6797 #include <linux/kdebug.h>
6798
6799 +#ifdef CONFIG_PAX_PAGEEXEC
6800 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6801 +{
6802 + unsigned long i;
6803 +
6804 + printk(KERN_ERR "PAX: bytes at PC: ");
6805 + for (i = 0; i < 5; i++) {
6806 + unsigned int c;
6807 + if (get_user(c, (unsigned int *)pc+i))
6808 + printk(KERN_CONT "???????? ");
6809 + else
6810 + printk(KERN_CONT "%08x ", c);
6811 + }
6812 + printk("\n");
6813 +}
6814 +#endif
6815 +
6816 /*
6817 * This routine handles page faults. It determines the address,
6818 * and the problem, and then passes it off to one of the appropriate
6819 @@ -199,6 +216,14 @@ bad_area:
6820 bad_area_nosemaphore:
6821 /* User mode accesses just cause a SIGSEGV */
6822 if (user_mode(regs)) {
6823 +
6824 +#ifdef CONFIG_PAX_PAGEEXEC
6825 + if (cpu_has_rixi && (mm->pax_flags & MF_PAX_PAGEEXEC) && !write && address == instruction_pointer(regs)) {
6826 + pax_report_fault(regs, (void *)address, (void *)user_stack_pointer(regs));
6827 + do_group_exit(SIGKILL);
6828 + }
6829 +#endif
6830 +
6831 tsk->thread.cp0_badvaddr = address;
6832 tsk->thread.error_code = write;
6833 #if 0
6834 diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
6835 index f1baadd..8537544 100644
6836 --- a/arch/mips/mm/mmap.c
6837 +++ b/arch/mips/mm/mmap.c
6838 @@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6839 struct vm_area_struct *vma;
6840 unsigned long addr = addr0;
6841 int do_color_align;
6842 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
6843 struct vm_unmapped_area_info info;
6844
6845 if (unlikely(len > TASK_SIZE))
6846 @@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6847 do_color_align = 1;
6848
6849 /* requesting a specific address */
6850 +
6851 +#ifdef CONFIG_PAX_RANDMMAP
6852 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
6853 +#endif
6854 +
6855 if (addr) {
6856 if (do_color_align)
6857 addr = COLOUR_ALIGN(addr, pgoff);
6858 @@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
6859 addr = PAGE_ALIGN(addr);
6860
6861 vma = find_vma(mm, addr);
6862 - if (TASK_SIZE - len >= addr &&
6863 - (!vma || addr + len <= vma->vm_start))
6864 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
6865 return addr;
6866 }
6867
6868 info.length = len;
6869 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
6870 info.align_offset = pgoff << PAGE_SHIFT;
6871 + info.threadstack_offset = offset;
6872
6873 if (dir == DOWN) {
6874 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
6875 @@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6876 {
6877 unsigned long random_factor = 0UL;
6878
6879 +#ifdef CONFIG_PAX_RANDMMAP
6880 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6881 +#endif
6882 +
6883 if (current->flags & PF_RANDOMIZE) {
6884 random_factor = get_random_int();
6885 random_factor = random_factor << PAGE_SHIFT;
6886 @@ -157,40 +167,25 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6887
6888 if (mmap_is_legacy()) {
6889 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
6890 +
6891 +#ifdef CONFIG_PAX_RANDMMAP
6892 + if (mm->pax_flags & MF_PAX_RANDMMAP)
6893 + mm->mmap_base += mm->delta_mmap;
6894 +#endif
6895 +
6896 mm->get_unmapped_area = arch_get_unmapped_area;
6897 } else {
6898 mm->mmap_base = mmap_base(random_factor);
6899 +
6900 +#ifdef CONFIG_PAX_RANDMMAP
6901 + if (mm->pax_flags & MF_PAX_RANDMMAP)
6902 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6903 +#endif
6904 +
6905 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6906 }
6907 }
6908
6909 -static inline unsigned long brk_rnd(void)
6910 -{
6911 - unsigned long rnd = get_random_int();
6912 -
6913 - rnd = rnd << PAGE_SHIFT;
6914 - /* 8MB for 32bit, 256MB for 64bit */
6915 - if (TASK_IS_32BIT_ADDR)
6916 - rnd = rnd & 0x7ffffful;
6917 - else
6918 - rnd = rnd & 0xffffffful;
6919 -
6920 - return rnd;
6921 -}
6922 -
6923 -unsigned long arch_randomize_brk(struct mm_struct *mm)
6924 -{
6925 - unsigned long base = mm->brk;
6926 - unsigned long ret;
6927 -
6928 - ret = PAGE_ALIGN(base + brk_rnd());
6929 -
6930 - if (ret < mm->brk)
6931 - return mm->brk;
6932 -
6933 - return ret;
6934 -}
6935 -
6936 int __virt_addr_valid(const volatile void *kaddr)
6937 {
6938 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
6939 diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
6940 index a2358b4..7cead4f 100644
6941 --- a/arch/mips/sgi-ip27/ip27-nmi.c
6942 +++ b/arch/mips/sgi-ip27/ip27-nmi.c
6943 @@ -187,9 +187,9 @@ void
6944 cont_nmi_dump(void)
6945 {
6946 #ifndef REAL_NMI_SIGNAL
6947 - static atomic_t nmied_cpus = ATOMIC_INIT(0);
6948 + static atomic_unchecked_t nmied_cpus = ATOMIC_INIT(0);
6949
6950 - atomic_inc(&nmied_cpus);
6951 + atomic_inc_unchecked(&nmied_cpus);
6952 #endif
6953 /*
6954 * Only allow 1 cpu to proceed
6955 @@ -233,7 +233,7 @@ cont_nmi_dump(void)
6956 udelay(10000);
6957 }
6958 #else
6959 - while (atomic_read(&nmied_cpus) != num_online_cpus());
6960 + while (atomic_read_unchecked(&nmied_cpus) != num_online_cpus());
6961 #endif
6962
6963 /*
6964 diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
6965 index 967d144..db12197 100644
6966 --- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
6967 +++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
6968 @@ -11,12 +11,14 @@
6969 #ifndef _ASM_PROC_CACHE_H
6970 #define _ASM_PROC_CACHE_H
6971
6972 +#include <linux/const.h>
6973 +
6974 /* L1 cache */
6975
6976 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6977 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
6978 -#define L1_CACHE_BYTES 16 /* bytes per entry */
6979 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
6980 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
6981 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
6982
6983 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
6984 diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6985 index bcb5df2..84fabd2 100644
6986 --- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6987 +++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
6988 @@ -16,13 +16,15 @@
6989 #ifndef _ASM_PROC_CACHE_H
6990 #define _ASM_PROC_CACHE_H
6991
6992 +#include <linux/const.h>
6993 +
6994 /*
6995 * L1 cache
6996 */
6997 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
6998 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
6999 -#define L1_CACHE_BYTES 32 /* bytes per entry */
7000 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
7001 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
7002 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
7003
7004 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
7005 diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
7006 index 4ce7a01..449202a 100644
7007 --- a/arch/openrisc/include/asm/cache.h
7008 +++ b/arch/openrisc/include/asm/cache.h
7009 @@ -19,11 +19,13 @@
7010 #ifndef __ASM_OPENRISC_CACHE_H
7011 #define __ASM_OPENRISC_CACHE_H
7012
7013 +#include <linux/const.h>
7014 +
7015 /* FIXME: How can we replace these with values from the CPU...
7016 * they shouldn't be hard-coded!
7017 */
7018
7019 -#define L1_CACHE_BYTES 16
7020 #define L1_CACHE_SHIFT 4
7021 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7022
7023 #endif /* __ASM_OPENRISC_CACHE_H */
7024 diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
7025 index 472886c..00e7df9 100644
7026 --- a/arch/parisc/include/asm/atomic.h
7027 +++ b/arch/parisc/include/asm/atomic.h
7028 @@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
7029 return dec;
7030 }
7031
7032 +#define atomic64_read_unchecked(v) atomic64_read(v)
7033 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7034 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7035 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7036 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7037 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7038 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7039 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7040 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7041 +
7042 #endif /* !CONFIG_64BIT */
7043
7044
7045 diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
7046 index 47f11c7..3420df2 100644
7047 --- a/arch/parisc/include/asm/cache.h
7048 +++ b/arch/parisc/include/asm/cache.h
7049 @@ -5,6 +5,7 @@
7050 #ifndef __ARCH_PARISC_CACHE_H
7051 #define __ARCH_PARISC_CACHE_H
7052
7053 +#include <linux/const.h>
7054
7055 /*
7056 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
7057 @@ -15,13 +16,13 @@
7058 * just ruin performance.
7059 */
7060 #ifdef CONFIG_PA20
7061 -#define L1_CACHE_BYTES 64
7062 #define L1_CACHE_SHIFT 6
7063 #else
7064 -#define L1_CACHE_BYTES 32
7065 #define L1_CACHE_SHIFT 5
7066 #endif
7067
7068 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7069 +
7070 #ifndef __ASSEMBLY__
7071
7072 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7073 diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
7074 index ad2b503..bdf1651 100644
7075 --- a/arch/parisc/include/asm/elf.h
7076 +++ b/arch/parisc/include/asm/elf.h
7077 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
7078
7079 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
7080
7081 +#ifdef CONFIG_PAX_ASLR
7082 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
7083 +
7084 +#define PAX_DELTA_MMAP_LEN 16
7085 +#define PAX_DELTA_STACK_LEN 16
7086 +#endif
7087 +
7088 /* This yields a mask that user programs can use to figure out what
7089 instruction set this CPU supports. This could be done in user space,
7090 but it's not easy, and we've already done it here. */
7091 diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
7092 index fc987a1..6e068ef 100644
7093 --- a/arch/parisc/include/asm/pgalloc.h
7094 +++ b/arch/parisc/include/asm/pgalloc.h
7095 @@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7096 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
7097 }
7098
7099 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
7100 +{
7101 + pgd_populate(mm, pgd, pmd);
7102 +}
7103 +
7104 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
7105 {
7106 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
7107 @@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
7108 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
7109 #define pmd_free(mm, x) do { } while (0)
7110 #define pgd_populate(mm, pmd, pte) BUG()
7111 +#define pgd_populate_kernel(mm, pmd, pte) BUG()
7112
7113 #endif
7114
7115 diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
7116 index 34899b5..02dd060 100644
7117 --- a/arch/parisc/include/asm/pgtable.h
7118 +++ b/arch/parisc/include/asm/pgtable.h
7119 @@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
7120 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
7121 #define PAGE_COPY PAGE_EXECREAD
7122 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
7123 +
7124 +#ifdef CONFIG_PAX_PAGEEXEC
7125 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
7126 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7127 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
7128 +#else
7129 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
7130 +# define PAGE_COPY_NOEXEC PAGE_COPY
7131 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
7132 +#endif
7133 +
7134 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
7135 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
7136 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
7137 diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
7138 index e0a8235..ce2f1e1 100644
7139 --- a/arch/parisc/include/asm/uaccess.h
7140 +++ b/arch/parisc/include/asm/uaccess.h
7141 @@ -245,10 +245,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
7142 const void __user *from,
7143 unsigned long n)
7144 {
7145 - int sz = __compiletime_object_size(to);
7146 + size_t sz = __compiletime_object_size(to);
7147 int ret = -EFAULT;
7148
7149 - if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
7150 + if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
7151 ret = __copy_from_user(to, from, n);
7152 else
7153 copy_from_user_overflow();
7154 diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
7155 index 2a625fb..9908930 100644
7156 --- a/arch/parisc/kernel/module.c
7157 +++ b/arch/parisc/kernel/module.c
7158 @@ -98,16 +98,38 @@
7159
7160 /* three functions to determine where in the module core
7161 * or init pieces the location is */
7162 +static inline int in_init_rx(struct module *me, void *loc)
7163 +{
7164 + return (loc >= me->module_init_rx &&
7165 + loc < (me->module_init_rx + me->init_size_rx));
7166 +}
7167 +
7168 +static inline int in_init_rw(struct module *me, void *loc)
7169 +{
7170 + return (loc >= me->module_init_rw &&
7171 + loc < (me->module_init_rw + me->init_size_rw));
7172 +}
7173 +
7174 static inline int in_init(struct module *me, void *loc)
7175 {
7176 - return (loc >= me->module_init &&
7177 - loc <= (me->module_init + me->init_size));
7178 + return in_init_rx(me, loc) || in_init_rw(me, loc);
7179 +}
7180 +
7181 +static inline int in_core_rx(struct module *me, void *loc)
7182 +{
7183 + return (loc >= me->module_core_rx &&
7184 + loc < (me->module_core_rx + me->core_size_rx));
7185 +}
7186 +
7187 +static inline int in_core_rw(struct module *me, void *loc)
7188 +{
7189 + return (loc >= me->module_core_rw &&
7190 + loc < (me->module_core_rw + me->core_size_rw));
7191 }
7192
7193 static inline int in_core(struct module *me, void *loc)
7194 {
7195 - return (loc >= me->module_core &&
7196 - loc <= (me->module_core + me->core_size));
7197 + return in_core_rx(me, loc) || in_core_rw(me, loc);
7198 }
7199
7200 static inline int in_local(struct module *me, void *loc)
7201 @@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
7202 }
7203
7204 /* align things a bit */
7205 - me->core_size = ALIGN(me->core_size, 16);
7206 - me->arch.got_offset = me->core_size;
7207 - me->core_size += gots * sizeof(struct got_entry);
7208 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
7209 + me->arch.got_offset = me->core_size_rw;
7210 + me->core_size_rw += gots * sizeof(struct got_entry);
7211
7212 - me->core_size = ALIGN(me->core_size, 16);
7213 - me->arch.fdesc_offset = me->core_size;
7214 - me->core_size += fdescs * sizeof(Elf_Fdesc);
7215 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
7216 + me->arch.fdesc_offset = me->core_size_rw;
7217 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
7218
7219 me->arch.got_max = gots;
7220 me->arch.fdesc_max = fdescs;
7221 @@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7222
7223 BUG_ON(value == 0);
7224
7225 - got = me->module_core + me->arch.got_offset;
7226 + got = me->module_core_rw + me->arch.got_offset;
7227 for (i = 0; got[i].addr; i++)
7228 if (got[i].addr == value)
7229 goto out;
7230 @@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
7231 #ifdef CONFIG_64BIT
7232 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7233 {
7234 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
7235 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
7236
7237 if (!value) {
7238 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
7239 @@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
7240
7241 /* Create new one */
7242 fdesc->addr = value;
7243 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7244 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7245 return (Elf_Addr)fdesc;
7246 }
7247 #endif /* CONFIG_64BIT */
7248 @@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
7249
7250 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
7251 end = table + sechdrs[me->arch.unwind_section].sh_size;
7252 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
7253 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
7254
7255 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
7256 me->arch.unwind_section, table, end, gp);
7257 diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
7258 index 0d3a9d4..44975d0 100644
7259 --- a/arch/parisc/kernel/sys_parisc.c
7260 +++ b/arch/parisc/kernel/sys_parisc.c
7261 @@ -33,9 +33,11 @@
7262 #include <linux/utsname.h>
7263 #include <linux/personality.h>
7264
7265 -static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7266 +static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
7267 + unsigned long flags)
7268 {
7269 struct vm_unmapped_area_info info;
7270 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7271
7272 info.flags = 0;
7273 info.length = len;
7274 @@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
7275 info.high_limit = TASK_SIZE;
7276 info.align_mask = 0;
7277 info.align_offset = 0;
7278 + info.threadstack_offset = offset;
7279 return vm_unmapped_area(&info);
7280 }
7281
7282 @@ -69,15 +72,17 @@ static unsigned long shared_align_offset(struct file *filp, unsigned long pgoff)
7283 }
7284
7285 static unsigned long get_shared_area(struct file *filp, unsigned long addr,
7286 - unsigned long len, unsigned long pgoff)
7287 + unsigned long len, unsigned long pgoff, unsigned long flags)
7288 {
7289 struct vm_unmapped_area_info info;
7290 + unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
7291
7292 info.flags = 0;
7293 info.length = len;
7294 info.low_limit = PAGE_ALIGN(addr);
7295 info.high_limit = TASK_SIZE;
7296 info.align_mask = PAGE_MASK & (SHMLBA - 1);
7297 + info.threadstack_offset = offset;
7298 info.align_offset = shared_align_offset(filp, pgoff);
7299 return vm_unmapped_area(&info);
7300 }
7301 @@ -93,13 +98,20 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7302 return -EINVAL;
7303 return addr;
7304 }
7305 - if (!addr)
7306 + if (!addr) {
7307 addr = TASK_UNMAPPED_BASE;
7308
7309 +#ifdef CONFIG_PAX_RANDMMAP
7310 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
7311 + addr += current->mm->delta_mmap;
7312 +#endif
7313 +
7314 + }
7315 +
7316 if (filp || (flags & MAP_SHARED))
7317 - addr = get_shared_area(filp, addr, len, pgoff);
7318 + addr = get_shared_area(filp, addr, len, pgoff, flags);
7319 else
7320 - addr = get_unshared_area(addr, len);
7321 + addr = get_unshared_area(addr, len, flags);
7322
7323 return addr;
7324 }
7325 diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
7326 index 1cd1d0c..44ec918 100644
7327 --- a/arch/parisc/kernel/traps.c
7328 +++ b/arch/parisc/kernel/traps.c
7329 @@ -722,9 +722,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
7330
7331 down_read(&current->mm->mmap_sem);
7332 vma = find_vma(current->mm,regs->iaoq[0]);
7333 - if (vma && (regs->iaoq[0] >= vma->vm_start)
7334 - && (vma->vm_flags & VM_EXEC)) {
7335 -
7336 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
7337 fault_address = regs->iaoq[0];
7338 fault_space = regs->iasq[0];
7339
7340 diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
7341 index 0293588..3b229aa 100644
7342 --- a/arch/parisc/mm/fault.c
7343 +++ b/arch/parisc/mm/fault.c
7344 @@ -15,6 +15,7 @@
7345 #include <linux/sched.h>
7346 #include <linux/interrupt.h>
7347 #include <linux/module.h>
7348 +#include <linux/unistd.h>
7349
7350 #include <asm/uaccess.h>
7351 #include <asm/traps.h>
7352 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
7353 static unsigned long
7354 parisc_acctyp(unsigned long code, unsigned int inst)
7355 {
7356 - if (code == 6 || code == 16)
7357 + if (code == 6 || code == 7 || code == 16)
7358 return VM_EXEC;
7359
7360 switch (inst & 0xf0000000) {
7361 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
7362 }
7363 #endif
7364
7365 +#ifdef CONFIG_PAX_PAGEEXEC
7366 +/*
7367 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
7368 + *
7369 + * returns 1 when task should be killed
7370 + * 2 when rt_sigreturn trampoline was detected
7371 + * 3 when unpatched PLT trampoline was detected
7372 + */
7373 +static int pax_handle_fetch_fault(struct pt_regs *regs)
7374 +{
7375 +
7376 +#ifdef CONFIG_PAX_EMUPLT
7377 + int err;
7378 +
7379 + do { /* PaX: unpatched PLT emulation */
7380 + unsigned int bl, depwi;
7381 +
7382 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
7383 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
7384 +
7385 + if (err)
7386 + break;
7387 +
7388 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
7389 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
7390 +
7391 + err = get_user(ldw, (unsigned int *)addr);
7392 + err |= get_user(bv, (unsigned int *)(addr+4));
7393 + err |= get_user(ldw2, (unsigned int *)(addr+8));
7394 +
7395 + if (err)
7396 + break;
7397 +
7398 + if (ldw == 0x0E801096U &&
7399 + bv == 0xEAC0C000U &&
7400 + ldw2 == 0x0E881095U)
7401 + {
7402 + unsigned int resolver, map;
7403 +
7404 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
7405 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
7406 + if (err)
7407 + break;
7408 +
7409 + regs->gr[20] = instruction_pointer(regs)+8;
7410 + regs->gr[21] = map;
7411 + regs->gr[22] = resolver;
7412 + regs->iaoq[0] = resolver | 3UL;
7413 + regs->iaoq[1] = regs->iaoq[0] + 4;
7414 + return 3;
7415 + }
7416 + }
7417 + } while (0);
7418 +#endif
7419 +
7420 +#ifdef CONFIG_PAX_EMUTRAMP
7421 +
7422 +#ifndef CONFIG_PAX_EMUSIGRT
7423 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
7424 + return 1;
7425 +#endif
7426 +
7427 + do { /* PaX: rt_sigreturn emulation */
7428 + unsigned int ldi1, ldi2, bel, nop;
7429 +
7430 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
7431 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
7432 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
7433 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
7434 +
7435 + if (err)
7436 + break;
7437 +
7438 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
7439 + ldi2 == 0x3414015AU &&
7440 + bel == 0xE4008200U &&
7441 + nop == 0x08000240U)
7442 + {
7443 + regs->gr[25] = (ldi1 & 2) >> 1;
7444 + regs->gr[20] = __NR_rt_sigreturn;
7445 + regs->gr[31] = regs->iaoq[1] + 16;
7446 + regs->sr[0] = regs->iasq[1];
7447 + regs->iaoq[0] = 0x100UL;
7448 + regs->iaoq[1] = regs->iaoq[0] + 4;
7449 + regs->iasq[0] = regs->sr[2];
7450 + regs->iasq[1] = regs->sr[2];
7451 + return 2;
7452 + }
7453 + } while (0);
7454 +#endif
7455 +
7456 + return 1;
7457 +}
7458 +
7459 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7460 +{
7461 + unsigned long i;
7462 +
7463 + printk(KERN_ERR "PAX: bytes at PC: ");
7464 + for (i = 0; i < 5; i++) {
7465 + unsigned int c;
7466 + if (get_user(c, (unsigned int *)pc+i))
7467 + printk(KERN_CONT "???????? ");
7468 + else
7469 + printk(KERN_CONT "%08x ", c);
7470 + }
7471 + printk("\n");
7472 +}
7473 +#endif
7474 +
7475 int fixup_exception(struct pt_regs *regs)
7476 {
7477 const struct exception_table_entry *fix;
7478 @@ -204,8 +315,33 @@ retry:
7479
7480 good_area:
7481
7482 - if ((vma->vm_flags & acc_type) != acc_type)
7483 + if ((vma->vm_flags & acc_type) != acc_type) {
7484 +
7485 +#ifdef CONFIG_PAX_PAGEEXEC
7486 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
7487 + (address & ~3UL) == instruction_pointer(regs))
7488 + {
7489 + up_read(&mm->mmap_sem);
7490 + switch (pax_handle_fetch_fault(regs)) {
7491 +
7492 +#ifdef CONFIG_PAX_EMUPLT
7493 + case 3:
7494 + return;
7495 +#endif
7496 +
7497 +#ifdef CONFIG_PAX_EMUTRAMP
7498 + case 2:
7499 + return;
7500 +#endif
7501 +
7502 + }
7503 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
7504 + do_group_exit(SIGKILL);
7505 + }
7506 +#endif
7507 +
7508 goto bad_area;
7509 + }
7510
7511 /*
7512 * If for any reason at all we couldn't handle the fault, make
7513 diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
7514 index 38f3b7e..7e485c0 100644
7515 --- a/arch/powerpc/Kconfig
7516 +++ b/arch/powerpc/Kconfig
7517 @@ -378,6 +378,7 @@ config ARCH_ENABLE_MEMORY_HOTREMOVE
7518 config KEXEC
7519 bool "kexec system call"
7520 depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
7521 + depends on !GRKERNSEC_KMEM
7522 help
7523 kexec is a system call that implements the ability to shutdown your
7524 current kernel, and to start another kernel. It is like a reboot
7525 diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
7526 index e3b1d41..8e81edf 100644
7527 --- a/arch/powerpc/include/asm/atomic.h
7528 +++ b/arch/powerpc/include/asm/atomic.h
7529 @@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
7530 return t1;
7531 }
7532
7533 +#define atomic64_read_unchecked(v) atomic64_read(v)
7534 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7535 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7536 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7537 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7538 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
7539 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7540 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
7541 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7542 +
7543 #endif /* __powerpc64__ */
7544
7545 #endif /* __KERNEL__ */
7546 diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
7547 index 9e495c9..b6878e5 100644
7548 --- a/arch/powerpc/include/asm/cache.h
7549 +++ b/arch/powerpc/include/asm/cache.h
7550 @@ -3,6 +3,7 @@
7551
7552 #ifdef __KERNEL__
7553
7554 +#include <linux/const.h>
7555
7556 /* bytes per L1 cache line */
7557 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
7558 @@ -22,7 +23,7 @@
7559 #define L1_CACHE_SHIFT 7
7560 #endif
7561
7562 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7563 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7564
7565 #define SMP_CACHE_BYTES L1_CACHE_BYTES
7566
7567 diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
7568 index cc0655a..13eac2e 100644
7569 --- a/arch/powerpc/include/asm/elf.h
7570 +++ b/arch/powerpc/include/asm/elf.h
7571 @@ -28,8 +28,19 @@
7572 the loader. We need to make sure that it is out of the way of the program
7573 that it will "exec", and that there is sufficient room for the brk. */
7574
7575 -extern unsigned long randomize_et_dyn(unsigned long base);
7576 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
7577 +#define ELF_ET_DYN_BASE (0x20000000)
7578 +
7579 +#ifdef CONFIG_PAX_ASLR
7580 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
7581 +
7582 +#ifdef __powerpc64__
7583 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
7584 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
7585 +#else
7586 +#define PAX_DELTA_MMAP_LEN 15
7587 +#define PAX_DELTA_STACK_LEN 15
7588 +#endif
7589 +#endif
7590
7591 /*
7592 * Our registers are always unsigned longs, whether we're a 32 bit
7593 @@ -123,10 +134,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
7594 (0x7ff >> (PAGE_SHIFT - 12)) : \
7595 (0x3ffff >> (PAGE_SHIFT - 12)))
7596
7597 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7598 -#define arch_randomize_brk arch_randomize_brk
7599 -
7600 -
7601 #ifdef CONFIG_SPU_BASE
7602 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
7603 #define NT_SPU 1
7604 diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
7605 index 8196e9c..d83a9f3 100644
7606 --- a/arch/powerpc/include/asm/exec.h
7607 +++ b/arch/powerpc/include/asm/exec.h
7608 @@ -4,6 +4,6 @@
7609 #ifndef _ASM_POWERPC_EXEC_H
7610 #define _ASM_POWERPC_EXEC_H
7611
7612 -extern unsigned long arch_align_stack(unsigned long sp);
7613 +#define arch_align_stack(x) ((x) & ~0xfUL)
7614
7615 #endif /* _ASM_POWERPC_EXEC_H */
7616 diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
7617 index 5acabbd..7ea14fa 100644
7618 --- a/arch/powerpc/include/asm/kmap_types.h
7619 +++ b/arch/powerpc/include/asm/kmap_types.h
7620 @@ -10,7 +10,7 @@
7621 * 2 of the License, or (at your option) any later version.
7622 */
7623
7624 -#define KM_TYPE_NR 16
7625 +#define KM_TYPE_NR 17
7626
7627 #endif /* __KERNEL__ */
7628 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
7629 diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
7630 index 8565c25..2865190 100644
7631 --- a/arch/powerpc/include/asm/mman.h
7632 +++ b/arch/powerpc/include/asm/mman.h
7633 @@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
7634 }
7635 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
7636
7637 -static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
7638 +static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
7639 {
7640 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
7641 }
7642 diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
7643 index b9f4262..dcf04f7 100644
7644 --- a/arch/powerpc/include/asm/page.h
7645 +++ b/arch/powerpc/include/asm/page.h
7646 @@ -230,8 +230,9 @@ extern long long virt_phys_offset;
7647 * and needs to be executable. This means the whole heap ends
7648 * up being executable.
7649 */
7650 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7651 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7652 +#define VM_DATA_DEFAULT_FLAGS32 \
7653 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7654 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7655
7656 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7657 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7658 @@ -259,6 +260,9 @@ extern long long virt_phys_offset;
7659 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
7660 #endif
7661
7662 +#define ktla_ktva(addr) (addr)
7663 +#define ktva_ktla(addr) (addr)
7664 +
7665 #ifndef CONFIG_PPC_BOOK3S_64
7666 /*
7667 * Use the top bit of the higher-level page table entries to indicate whether
7668 diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
7669 index 88693ce..ac6f9ab 100644
7670 --- a/arch/powerpc/include/asm/page_64.h
7671 +++ b/arch/powerpc/include/asm/page_64.h
7672 @@ -153,15 +153,18 @@ do { \
7673 * stack by default, so in the absence of a PT_GNU_STACK program header
7674 * we turn execute permission off.
7675 */
7676 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
7677 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7678 +#define VM_STACK_DEFAULT_FLAGS32 \
7679 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
7680 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7681
7682 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
7683 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
7684
7685 +#ifndef CONFIG_PAX_PAGEEXEC
7686 #define VM_STACK_DEFAULT_FLAGS \
7687 (is_32bit_task() ? \
7688 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
7689 +#endif
7690
7691 #include <asm-generic/getorder.h>
7692
7693 diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
7694 index 256d6f8..b0166a7 100644
7695 --- a/arch/powerpc/include/asm/pgalloc-64.h
7696 +++ b/arch/powerpc/include/asm/pgalloc-64.h
7697 @@ -53,6 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7698 #ifndef CONFIG_PPC_64K_PAGES
7699
7700 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
7701 +#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
7702
7703 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
7704 {
7705 @@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7706 pud_set(pud, (unsigned long)pmd);
7707 }
7708
7709 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
7710 +{
7711 + pud_populate(mm, pud, pmd);
7712 +}
7713 +
7714 #define pmd_populate(mm, pmd, pte_page) \
7715 pmd_populate_kernel(mm, pmd, page_address(pte_page))
7716 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
7717 @@ -169,6 +175,7 @@ extern void __tlb_remove_table(void *_table);
7718 #endif
7719
7720 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
7721 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
7722
7723 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
7724 pte_t *pte)
7725 diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
7726 index 7d6eacf..14c0240 100644
7727 --- a/arch/powerpc/include/asm/pgtable.h
7728 +++ b/arch/powerpc/include/asm/pgtable.h
7729 @@ -2,6 +2,7 @@
7730 #define _ASM_POWERPC_PGTABLE_H
7731 #ifdef __KERNEL__
7732
7733 +#include <linux/const.h>
7734 #ifndef __ASSEMBLY__
7735 #include <asm/processor.h> /* For TASK_SIZE */
7736 #include <asm/mmu.h>
7737 diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
7738 index 4aad413..85d86bf 100644
7739 --- a/arch/powerpc/include/asm/pte-hash32.h
7740 +++ b/arch/powerpc/include/asm/pte-hash32.h
7741 @@ -21,6 +21,7 @@
7742 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
7743 #define _PAGE_USER 0x004 /* usermode access allowed */
7744 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
7745 +#define _PAGE_EXEC _PAGE_GUARDED
7746 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
7747 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
7748 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
7749 diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
7750 index 10d1ef0..8f83abc 100644
7751 --- a/arch/powerpc/include/asm/reg.h
7752 +++ b/arch/powerpc/include/asm/reg.h
7753 @@ -234,6 +234,7 @@
7754 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
7755 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
7756 #define DSISR_NOHPTE 0x40000000 /* no translation found */
7757 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
7758 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
7759 #define DSISR_ISSTORE 0x02000000 /* access was a store */
7760 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
7761 diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
7762 index 98da78e..dc68271 100644
7763 --- a/arch/powerpc/include/asm/smp.h
7764 +++ b/arch/powerpc/include/asm/smp.h
7765 @@ -50,7 +50,7 @@ struct smp_ops_t {
7766 int (*cpu_disable)(void);
7767 void (*cpu_die)(unsigned int nr);
7768 int (*cpu_bootable)(unsigned int nr);
7769 -};
7770 +} __no_const;
7771
7772 extern void smp_send_debugger_break(void);
7773 extern void start_secondary_resume(void);
7774 diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
7775 index ba7b197..d292e26 100644
7776 --- a/arch/powerpc/include/asm/thread_info.h
7777 +++ b/arch/powerpc/include/asm/thread_info.h
7778 @@ -93,7 +93,6 @@ static inline struct thread_info *current_thread_info(void)
7779 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
7780 TIF_NEED_RESCHED */
7781 #define TIF_32BIT 4 /* 32 bit binary */
7782 -#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
7783 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
7784 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
7785 #define TIF_SINGLESTEP 8 /* singlestepping active */
7786 @@ -107,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
7787 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
7788 for stack store? */
7789 #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
7790 +#define TIF_PERFMON_WORK 18 /* work for pfm_handle_work() */
7791 +/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
7792 +#define TIF_GRSEC_SETXID 5 /* update credentials on syscall entry/exit */
7793
7794 /* as above, but as bit values */
7795 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
7796 @@ -126,9 +128,10 @@ static inline struct thread_info *current_thread_info(void)
7797 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7798 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
7799 #define _TIF_NOHZ (1<<TIF_NOHZ)
7800 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7801 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
7802 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
7803 - _TIF_NOHZ)
7804 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
7805
7806 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
7807 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
7808 diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
7809 index 9485b43..4718d50 100644
7810 --- a/arch/powerpc/include/asm/uaccess.h
7811 +++ b/arch/powerpc/include/asm/uaccess.h
7812 @@ -318,52 +318,6 @@ do { \
7813 extern unsigned long __copy_tofrom_user(void __user *to,
7814 const void __user *from, unsigned long size);
7815
7816 -#ifndef __powerpc64__
7817 -
7818 -static inline unsigned long copy_from_user(void *to,
7819 - const void __user *from, unsigned long n)
7820 -{
7821 - unsigned long over;
7822 -
7823 - if (access_ok(VERIFY_READ, from, n))
7824 - return __copy_tofrom_user((__force void __user *)to, from, n);
7825 - if ((unsigned long)from < TASK_SIZE) {
7826 - over = (unsigned long)from + n - TASK_SIZE;
7827 - return __copy_tofrom_user((__force void __user *)to, from,
7828 - n - over) + over;
7829 - }
7830 - return n;
7831 -}
7832 -
7833 -static inline unsigned long copy_to_user(void __user *to,
7834 - const void *from, unsigned long n)
7835 -{
7836 - unsigned long over;
7837 -
7838 - if (access_ok(VERIFY_WRITE, to, n))
7839 - return __copy_tofrom_user(to, (__force void __user *)from, n);
7840 - if ((unsigned long)to < TASK_SIZE) {
7841 - over = (unsigned long)to + n - TASK_SIZE;
7842 - return __copy_tofrom_user(to, (__force void __user *)from,
7843 - n - over) + over;
7844 - }
7845 - return n;
7846 -}
7847 -
7848 -#else /* __powerpc64__ */
7849 -
7850 -#define __copy_in_user(to, from, size) \
7851 - __copy_tofrom_user((to), (from), (size))
7852 -
7853 -extern unsigned long copy_from_user(void *to, const void __user *from,
7854 - unsigned long n);
7855 -extern unsigned long copy_to_user(void __user *to, const void *from,
7856 - unsigned long n);
7857 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
7858 - unsigned long n);
7859 -
7860 -#endif /* __powerpc64__ */
7861 -
7862 static inline unsigned long __copy_from_user_inatomic(void *to,
7863 const void __user *from, unsigned long n)
7864 {
7865 @@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
7866 if (ret == 0)
7867 return 0;
7868 }
7869 +
7870 + if (!__builtin_constant_p(n))
7871 + check_object_size(to, n, false);
7872 +
7873 return __copy_tofrom_user((__force void __user *)to, from, n);
7874 }
7875
7876 @@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
7877 if (ret == 0)
7878 return 0;
7879 }
7880 +
7881 + if (!__builtin_constant_p(n))
7882 + check_object_size(from, n, true);
7883 +
7884 return __copy_tofrom_user(to, (__force const void __user *)from, n);
7885 }
7886
7887 @@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
7888 return __copy_to_user_inatomic(to, from, size);
7889 }
7890
7891 +#ifndef __powerpc64__
7892 +
7893 +static inline unsigned long __must_check copy_from_user(void *to,
7894 + const void __user *from, unsigned long n)
7895 +{
7896 + unsigned long over;
7897 +
7898 + if ((long)n < 0)
7899 + return n;
7900 +
7901 + if (access_ok(VERIFY_READ, from, n)) {
7902 + if (!__builtin_constant_p(n))
7903 + check_object_size(to, n, false);
7904 + return __copy_tofrom_user((__force void __user *)to, from, n);
7905 + }
7906 + if ((unsigned long)from < TASK_SIZE) {
7907 + over = (unsigned long)from + n - TASK_SIZE;
7908 + if (!__builtin_constant_p(n - over))
7909 + check_object_size(to, n - over, false);
7910 + return __copy_tofrom_user((__force void __user *)to, from,
7911 + n - over) + over;
7912 + }
7913 + return n;
7914 +}
7915 +
7916 +static inline unsigned long __must_check copy_to_user(void __user *to,
7917 + const void *from, unsigned long n)
7918 +{
7919 + unsigned long over;
7920 +
7921 + if ((long)n < 0)
7922 + return n;
7923 +
7924 + if (access_ok(VERIFY_WRITE, to, n)) {
7925 + if (!__builtin_constant_p(n))
7926 + check_object_size(from, n, true);
7927 + return __copy_tofrom_user(to, (__force void __user *)from, n);
7928 + }
7929 + if ((unsigned long)to < TASK_SIZE) {
7930 + over = (unsigned long)to + n - TASK_SIZE;
7931 + if (!__builtin_constant_p(n))
7932 + check_object_size(from, n - over, true);
7933 + return __copy_tofrom_user(to, (__force void __user *)from,
7934 + n - over) + over;
7935 + }
7936 + return n;
7937 +}
7938 +
7939 +#else /* __powerpc64__ */
7940 +
7941 +#define __copy_in_user(to, from, size) \
7942 + __copy_tofrom_user((to), (from), (size))
7943 +
7944 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
7945 +{
7946 + if ((long)n < 0 || n > INT_MAX)
7947 + return n;
7948 +
7949 + if (!__builtin_constant_p(n))
7950 + check_object_size(to, n, false);
7951 +
7952 + if (likely(access_ok(VERIFY_READ, from, n)))
7953 + n = __copy_from_user(to, from, n);
7954 + else
7955 + memset(to, 0, n);
7956 + return n;
7957 +}
7958 +
7959 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
7960 +{
7961 + if ((long)n < 0 || n > INT_MAX)
7962 + return n;
7963 +
7964 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
7965 + if (!__builtin_constant_p(n))
7966 + check_object_size(from, n, true);
7967 + n = __copy_to_user(to, from, n);
7968 + }
7969 + return n;
7970 +}
7971 +
7972 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
7973 + unsigned long n);
7974 +
7975 +#endif /* __powerpc64__ */
7976 +
7977 extern unsigned long __clear_user(void __user *addr, unsigned long size);
7978
7979 static inline unsigned long clear_user(void __user *addr, unsigned long size)
7980 diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
7981 index 2d06704..1616f1b 100644
7982 --- a/arch/powerpc/kernel/exceptions-64e.S
7983 +++ b/arch/powerpc/kernel/exceptions-64e.S
7984 @@ -757,6 +757,7 @@ storage_fault_common:
7985 std r14,_DAR(r1)
7986 std r15,_DSISR(r1)
7987 addi r3,r1,STACK_FRAME_OVERHEAD
7988 + bl .save_nvgprs
7989 mr r4,r14
7990 mr r5,r15
7991 ld r14,PACA_EXGEN+EX_R14(r13)
7992 @@ -765,8 +766,7 @@ storage_fault_common:
7993 cmpdi r3,0
7994 bne- 1f
7995 b .ret_from_except_lite
7996 -1: bl .save_nvgprs
7997 - mr r5,r3
7998 +1: mr r5,r3
7999 addi r3,r1,STACK_FRAME_OVERHEAD
8000 ld r4,_DAR(r1)
8001 bl .bad_page_fault
8002 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
8003 index 3a9ed6a..b534681 100644
8004 --- a/arch/powerpc/kernel/exceptions-64s.S
8005 +++ b/arch/powerpc/kernel/exceptions-64s.S
8006 @@ -1364,10 +1364,10 @@ handle_page_fault:
8007 11: ld r4,_DAR(r1)
8008 ld r5,_DSISR(r1)
8009 addi r3,r1,STACK_FRAME_OVERHEAD
8010 + bl .save_nvgprs
8011 bl .do_page_fault
8012 cmpdi r3,0
8013 beq+ 12f
8014 - bl .save_nvgprs
8015 mr r5,r3
8016 addi r3,r1,STACK_FRAME_OVERHEAD
8017 lwz r4,_DAR(r1)
8018 diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
8019 index 2e3200c..72095ce 100644
8020 --- a/arch/powerpc/kernel/module_32.c
8021 +++ b/arch/powerpc/kernel/module_32.c
8022 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
8023 me->arch.core_plt_section = i;
8024 }
8025 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
8026 - printk("Module doesn't contain .plt or .init.plt sections.\n");
8027 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
8028 return -ENOEXEC;
8029 }
8030
8031 @@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
8032
8033 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
8034 /* Init, or core PLT? */
8035 - if (location >= mod->module_core
8036 - && location < mod->module_core + mod->core_size)
8037 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
8038 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
8039 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
8040 - else
8041 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
8042 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
8043 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
8044 + else {
8045 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
8046 + return ~0UL;
8047 + }
8048
8049 /* Find this entry, or if that fails, the next avail. entry */
8050 while (entry->jump[0]) {
8051 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
8052 index 96d2fdf..f6d10c8 100644
8053 --- a/arch/powerpc/kernel/process.c
8054 +++ b/arch/powerpc/kernel/process.c
8055 @@ -886,8 +886,8 @@ void show_regs(struct pt_regs * regs)
8056 * Lookup NIP late so we have the best change of getting the
8057 * above info out without failing
8058 */
8059 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
8060 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
8061 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
8062 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
8063 #endif
8064 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
8065 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
8066 @@ -1352,10 +1352,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8067 newsp = stack[0];
8068 ip = stack[STACK_FRAME_LR_SAVE];
8069 if (!firstframe || ip != lr) {
8070 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
8071 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
8072 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8073 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
8074 - printk(" (%pS)",
8075 + printk(" (%pA)",
8076 (void *)current->ret_stack[curr_frame].ret);
8077 curr_frame--;
8078 }
8079 @@ -1375,7 +1375,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
8080 struct pt_regs *regs = (struct pt_regs *)
8081 (sp + STACK_FRAME_OVERHEAD);
8082 lr = regs->link;
8083 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
8084 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
8085 regs->trap, (void *)regs->nip, (void *)lr);
8086 firstframe = 1;
8087 }
8088 @@ -1411,58 +1411,3 @@ void notrace __ppc64_runlatch_off(void)
8089 mtspr(SPRN_CTRLT, ctrl);
8090 }
8091 #endif /* CONFIG_PPC64 */
8092 -
8093 -unsigned long arch_align_stack(unsigned long sp)
8094 -{
8095 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8096 - sp -= get_random_int() & ~PAGE_MASK;
8097 - return sp & ~0xf;
8098 -}
8099 -
8100 -static inline unsigned long brk_rnd(void)
8101 -{
8102 - unsigned long rnd = 0;
8103 -
8104 - /* 8MB for 32bit, 1GB for 64bit */
8105 - if (is_32bit_task())
8106 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
8107 - else
8108 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
8109 -
8110 - return rnd << PAGE_SHIFT;
8111 -}
8112 -
8113 -unsigned long arch_randomize_brk(struct mm_struct *mm)
8114 -{
8115 - unsigned long base = mm->brk;
8116 - unsigned long ret;
8117 -
8118 -#ifdef CONFIG_PPC_STD_MMU_64
8119 - /*
8120 - * If we are using 1TB segments and we are allowed to randomise
8121 - * the heap, we can put it above 1TB so it is backed by a 1TB
8122 - * segment. Otherwise the heap will be in the bottom 1TB
8123 - * which always uses 256MB segments and this may result in a
8124 - * performance penalty.
8125 - */
8126 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
8127 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
8128 -#endif
8129 -
8130 - ret = PAGE_ALIGN(base + brk_rnd());
8131 -
8132 - if (ret < mm->brk)
8133 - return mm->brk;
8134 -
8135 - return ret;
8136 -}
8137 -
8138 -unsigned long randomize_et_dyn(unsigned long base)
8139 -{
8140 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8141 -
8142 - if (ret < base)
8143 - return base;
8144 -
8145 - return ret;
8146 -}
8147 diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
8148 index 9a0d24c..e7fbedf 100644
8149 --- a/arch/powerpc/kernel/ptrace.c
8150 +++ b/arch/powerpc/kernel/ptrace.c
8151 @@ -1761,6 +1761,10 @@ long arch_ptrace(struct task_struct *child, long request,
8152 return ret;
8153 }
8154
8155 +#ifdef CONFIG_GRKERNSEC_SETXID
8156 +extern void gr_delayed_cred_worker(void);
8157 +#endif
8158 +
8159 /*
8160 * We must return the syscall number to actually look up in the table.
8161 * This can be -1L to skip running any syscall at all.
8162 @@ -1773,6 +1777,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
8163
8164 secure_computing_strict(regs->gpr[0]);
8165
8166 +#ifdef CONFIG_GRKERNSEC_SETXID
8167 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8168 + gr_delayed_cred_worker();
8169 +#endif
8170 +
8171 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
8172 tracehook_report_syscall_entry(regs))
8173 /*
8174 @@ -1807,6 +1816,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
8175 {
8176 int step;
8177
8178 +#ifdef CONFIG_GRKERNSEC_SETXID
8179 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8180 + gr_delayed_cred_worker();
8181 +#endif
8182 +
8183 audit_syscall_exit(regs);
8184
8185 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8186 diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
8187 index fea2dba..a779f6b 100644
8188 --- a/arch/powerpc/kernel/signal_32.c
8189 +++ b/arch/powerpc/kernel/signal_32.c
8190 @@ -1002,7 +1002,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
8191 /* Save user registers on the stack */
8192 frame = &rt_sf->uc.uc_mcontext;
8193 addr = frame;
8194 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
8195 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8196 sigret = 0;
8197 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
8198 } else {
8199 diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
8200 index 1e7ba88..17afb1b 100644
8201 --- a/arch/powerpc/kernel/signal_64.c
8202 +++ b/arch/powerpc/kernel/signal_64.c
8203 @@ -763,7 +763,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
8204 #endif
8205
8206 /* Set up to return from userspace. */
8207 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
8208 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
8209 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
8210 } else {
8211 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
8212 diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
8213 index f783c93..619baf1 100644
8214 --- a/arch/powerpc/kernel/traps.c
8215 +++ b/arch/powerpc/kernel/traps.c
8216 @@ -142,6 +142,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
8217 return flags;
8218 }
8219
8220 +extern void gr_handle_kernel_exploit(void);
8221 +
8222 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8223 int signr)
8224 {
8225 @@ -191,6 +193,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
8226 panic("Fatal exception in interrupt");
8227 if (panic_on_oops)
8228 panic("Fatal exception");
8229 +
8230 + gr_handle_kernel_exploit();
8231 +
8232 do_exit(signr);
8233 }
8234
8235 diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
8236 index 1d9c926..25f4741 100644
8237 --- a/arch/powerpc/kernel/vdso.c
8238 +++ b/arch/powerpc/kernel/vdso.c
8239 @@ -34,6 +34,7 @@
8240 #include <asm/firmware.h>
8241 #include <asm/vdso.h>
8242 #include <asm/vdso_datapage.h>
8243 +#include <asm/mman.h>
8244
8245 #include "setup.h"
8246
8247 @@ -222,7 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8248 vdso_base = VDSO32_MBASE;
8249 #endif
8250
8251 - current->mm->context.vdso_base = 0;
8252 + current->mm->context.vdso_base = ~0UL;
8253
8254 /* vDSO has a problem and was disabled, just don't "enable" it for the
8255 * process
8256 @@ -242,7 +243,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
8257 vdso_base = get_unmapped_area(NULL, vdso_base,
8258 (vdso_pages << PAGE_SHIFT) +
8259 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
8260 - 0, 0);
8261 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
8262 if (IS_ERR_VALUE(vdso_base)) {
8263 rc = vdso_base;
8264 goto fail_mmapsem;
8265 diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
8266 index 5eea6f3..5d10396 100644
8267 --- a/arch/powerpc/lib/usercopy_64.c
8268 +++ b/arch/powerpc/lib/usercopy_64.c
8269 @@ -9,22 +9,6 @@
8270 #include <linux/module.h>
8271 #include <asm/uaccess.h>
8272
8273 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8274 -{
8275 - if (likely(access_ok(VERIFY_READ, from, n)))
8276 - n = __copy_from_user(to, from, n);
8277 - else
8278 - memset(to, 0, n);
8279 - return n;
8280 -}
8281 -
8282 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8283 -{
8284 - if (likely(access_ok(VERIFY_WRITE, to, n)))
8285 - n = __copy_to_user(to, from, n);
8286 - return n;
8287 -}
8288 -
8289 unsigned long copy_in_user(void __user *to, const void __user *from,
8290 unsigned long n)
8291 {
8292 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
8293 return n;
8294 }
8295
8296 -EXPORT_SYMBOL(copy_from_user);
8297 -EXPORT_SYMBOL(copy_to_user);
8298 EXPORT_SYMBOL(copy_in_user);
8299
8300 diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
8301 index 51ab9e7..7d3c78b 100644
8302 --- a/arch/powerpc/mm/fault.c
8303 +++ b/arch/powerpc/mm/fault.c
8304 @@ -33,6 +33,10 @@
8305 #include <linux/magic.h>
8306 #include <linux/ratelimit.h>
8307 #include <linux/context_tracking.h>
8308 +#include <linux/slab.h>
8309 +#include <linux/pagemap.h>
8310 +#include <linux/compiler.h>
8311 +#include <linux/unistd.h>
8312
8313 #include <asm/firmware.h>
8314 #include <asm/page.h>
8315 @@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
8316 }
8317 #endif
8318
8319 +#ifdef CONFIG_PAX_PAGEEXEC
8320 +/*
8321 + * PaX: decide what to do with offenders (regs->nip = fault address)
8322 + *
8323 + * returns 1 when task should be killed
8324 + */
8325 +static int pax_handle_fetch_fault(struct pt_regs *regs)
8326 +{
8327 + return 1;
8328 +}
8329 +
8330 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
8331 +{
8332 + unsigned long i;
8333 +
8334 + printk(KERN_ERR "PAX: bytes at PC: ");
8335 + for (i = 0; i < 5; i++) {
8336 + unsigned int c;
8337 + if (get_user(c, (unsigned int __user *)pc+i))
8338 + printk(KERN_CONT "???????? ");
8339 + else
8340 + printk(KERN_CONT "%08x ", c);
8341 + }
8342 + printk("\n");
8343 +}
8344 +#endif
8345 +
8346 /*
8347 * Check whether the instruction at regs->nip is a store using
8348 * an update addressing form which will update r1.
8349 @@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
8350 * indicate errors in DSISR but can validly be set in SRR1.
8351 */
8352 if (trap == 0x400)
8353 - error_code &= 0x48200000;
8354 + error_code &= 0x58200000;
8355 else
8356 is_write = error_code & DSISR_ISSTORE;
8357 #else
8358 @@ -378,7 +409,7 @@ good_area:
8359 * "undefined". Of those that can be set, this is the only
8360 * one which seems bad.
8361 */
8362 - if (error_code & 0x10000000)
8363 + if (error_code & DSISR_GUARDED)
8364 /* Guarded storage error. */
8365 goto bad_area;
8366 #endif /* CONFIG_8xx */
8367 @@ -393,7 +424,7 @@ good_area:
8368 * processors use the same I/D cache coherency mechanism
8369 * as embedded.
8370 */
8371 - if (error_code & DSISR_PROTFAULT)
8372 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
8373 goto bad_area;
8374 #endif /* CONFIG_PPC_STD_MMU */
8375
8376 @@ -483,6 +514,23 @@ bad_area:
8377 bad_area_nosemaphore:
8378 /* User mode accesses cause a SIGSEGV */
8379 if (user_mode(regs)) {
8380 +
8381 +#ifdef CONFIG_PAX_PAGEEXEC
8382 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
8383 +#ifdef CONFIG_PPC_STD_MMU
8384 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
8385 +#else
8386 + if (is_exec && regs->nip == address) {
8387 +#endif
8388 + switch (pax_handle_fetch_fault(regs)) {
8389 + }
8390 +
8391 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
8392 + do_group_exit(SIGKILL);
8393 + }
8394 + }
8395 +#endif
8396 +
8397 _exception(SIGSEGV, regs, code, address);
8398 goto bail;
8399 }
8400 diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
8401 index cb8bdbe..d770680 100644
8402 --- a/arch/powerpc/mm/mmap.c
8403 +++ b/arch/powerpc/mm/mmap.c
8404 @@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
8405 {
8406 unsigned long rnd = 0;
8407
8408 +#ifdef CONFIG_PAX_RANDMMAP
8409 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8410 +#endif
8411 +
8412 if (current->flags & PF_RANDOMIZE) {
8413 /* 8MB for 32bit, 1GB for 64bit */
8414 if (is_32bit_task())
8415 @@ -91,9 +95,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8416 */
8417 if (mmap_is_legacy()) {
8418 mm->mmap_base = TASK_UNMAPPED_BASE;
8419 +
8420 +#ifdef CONFIG_PAX_RANDMMAP
8421 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8422 + mm->mmap_base += mm->delta_mmap;
8423 +#endif
8424 +
8425 mm->get_unmapped_area = arch_get_unmapped_area;
8426 } else {
8427 mm->mmap_base = mmap_base();
8428 +
8429 +#ifdef CONFIG_PAX_RANDMMAP
8430 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8431 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8432 +#endif
8433 +
8434 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8435 }
8436 }
8437 diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
8438 index 7ce9cf3..a964087 100644
8439 --- a/arch/powerpc/mm/slice.c
8440 +++ b/arch/powerpc/mm/slice.c
8441 @@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
8442 if ((mm->task_size - len) < addr)
8443 return 0;
8444 vma = find_vma(mm, addr);
8445 - return (!vma || (addr + len) <= vma->vm_start);
8446 + return check_heap_stack_gap(vma, addr, len, 0);
8447 }
8448
8449 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
8450 @@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
8451 info.align_offset = 0;
8452
8453 addr = TASK_UNMAPPED_BASE;
8454 +
8455 +#ifdef CONFIG_PAX_RANDMMAP
8456 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8457 + addr += mm->delta_mmap;
8458 +#endif
8459 +
8460 while (addr < TASK_SIZE) {
8461 info.low_limit = addr;
8462 if (!slice_scan_available(addr, available, 1, &addr))
8463 @@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
8464 if (fixed && addr > (mm->task_size - len))
8465 return -EINVAL;
8466
8467 +#ifdef CONFIG_PAX_RANDMMAP
8468 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
8469 + addr = 0;
8470 +#endif
8471 +
8472 /* If hint, make sure it matches our alignment restrictions */
8473 if (!fixed && addr) {
8474 addr = _ALIGN_UP(addr, 1ul << pshift);
8475 diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
8476 index 9098692..3d54cd1 100644
8477 --- a/arch/powerpc/platforms/cell/spufs/file.c
8478 +++ b/arch/powerpc/platforms/cell/spufs/file.c
8479 @@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
8480 return VM_FAULT_NOPAGE;
8481 }
8482
8483 -static int spufs_mem_mmap_access(struct vm_area_struct *vma,
8484 +static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
8485 unsigned long address,
8486 - void *buf, int len, int write)
8487 + void *buf, size_t len, int write)
8488 {
8489 struct spu_context *ctx = vma->vm_file->private_data;
8490 unsigned long offset = address - vma->vm_start;
8491 diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
8492 index c797832..ce575c8 100644
8493 --- a/arch/s390/include/asm/atomic.h
8494 +++ b/arch/s390/include/asm/atomic.h
8495 @@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
8496 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
8497 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
8498
8499 +#define atomic64_read_unchecked(v) atomic64_read(v)
8500 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
8501 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
8502 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
8503 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
8504 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
8505 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
8506 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
8507 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
8508 +
8509 #define smp_mb__before_atomic_dec() smp_mb()
8510 #define smp_mb__after_atomic_dec() smp_mb()
8511 #define smp_mb__before_atomic_inc() smp_mb()
8512 diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
8513 index 4d7ccac..d03d0ad 100644
8514 --- a/arch/s390/include/asm/cache.h
8515 +++ b/arch/s390/include/asm/cache.h
8516 @@ -9,8 +9,10 @@
8517 #ifndef __ARCH_S390_CACHE_H
8518 #define __ARCH_S390_CACHE_H
8519
8520 -#define L1_CACHE_BYTES 256
8521 +#include <linux/const.h>
8522 +
8523 #define L1_CACHE_SHIFT 8
8524 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8525 #define NET_SKB_PAD 32
8526
8527 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8528 diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
8529 index 78f4f87..598ce39 100644
8530 --- a/arch/s390/include/asm/elf.h
8531 +++ b/arch/s390/include/asm/elf.h
8532 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
8533 the loader. We need to make sure that it is out of the way of the program
8534 that it will "exec", and that there is sufficient room for the brk. */
8535
8536 -extern unsigned long randomize_et_dyn(unsigned long base);
8537 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
8538 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
8539 +
8540 +#ifdef CONFIG_PAX_ASLR
8541 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
8542 +
8543 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8544 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
8545 +#endif
8546
8547 /* This yields a mask that user programs can use to figure out what
8548 instruction set this CPU supports. */
8549 @@ -222,9 +228,6 @@ struct linux_binprm;
8550 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
8551 int arch_setup_additional_pages(struct linux_binprm *, int);
8552
8553 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8554 -#define arch_randomize_brk arch_randomize_brk
8555 -
8556 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
8557
8558 #endif
8559 diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
8560 index c4a93d6..4d2a9b4 100644
8561 --- a/arch/s390/include/asm/exec.h
8562 +++ b/arch/s390/include/asm/exec.h
8563 @@ -7,6 +7,6 @@
8564 #ifndef __ASM_EXEC_H
8565 #define __ASM_EXEC_H
8566
8567 -extern unsigned long arch_align_stack(unsigned long sp);
8568 +#define arch_align_stack(x) ((x) & ~0xfUL)
8569
8570 #endif /* __ASM_EXEC_H */
8571 diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
8572 index 9c33ed4..e40cbef 100644
8573 --- a/arch/s390/include/asm/uaccess.h
8574 +++ b/arch/s390/include/asm/uaccess.h
8575 @@ -252,6 +252,10 @@ static inline unsigned long __must_check
8576 copy_to_user(void __user *to, const void *from, unsigned long n)
8577 {
8578 might_fault();
8579 +
8580 + if ((long)n < 0)
8581 + return n;
8582 +
8583 return __copy_to_user(to, from, n);
8584 }
8585
8586 @@ -275,6 +279,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
8587 static inline unsigned long __must_check
8588 __copy_from_user(void *to, const void __user *from, unsigned long n)
8589 {
8590 + if ((long)n < 0)
8591 + return n;
8592 +
8593 if (__builtin_constant_p(n) && (n <= 256))
8594 return uaccess.copy_from_user_small(n, from, to);
8595 else
8596 @@ -306,10 +313,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
8597 static inline unsigned long __must_check
8598 copy_from_user(void *to, const void __user *from, unsigned long n)
8599 {
8600 - unsigned int sz = __compiletime_object_size(to);
8601 + size_t sz = __compiletime_object_size(to);
8602
8603 might_fault();
8604 - if (unlikely(sz != -1 && sz < n)) {
8605 +
8606 + if ((long)n < 0)
8607 + return n;
8608 +
8609 + if (unlikely(sz != (size_t)-1 && sz < n)) {
8610 copy_from_user_overflow();
8611 return n;
8612 }
8613 diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
8614 index 7845e15..59c4353 100644
8615 --- a/arch/s390/kernel/module.c
8616 +++ b/arch/s390/kernel/module.c
8617 @@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
8618
8619 /* Increase core size by size of got & plt and set start
8620 offsets for got and plt. */
8621 - me->core_size = ALIGN(me->core_size, 4);
8622 - me->arch.got_offset = me->core_size;
8623 - me->core_size += me->arch.got_size;
8624 - me->arch.plt_offset = me->core_size;
8625 - me->core_size += me->arch.plt_size;
8626 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
8627 + me->arch.got_offset = me->core_size_rw;
8628 + me->core_size_rw += me->arch.got_size;
8629 + me->arch.plt_offset = me->core_size_rx;
8630 + me->core_size_rx += me->arch.plt_size;
8631 return 0;
8632 }
8633
8634 @@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8635 if (info->got_initialized == 0) {
8636 Elf_Addr *gotent;
8637
8638 - gotent = me->module_core + me->arch.got_offset +
8639 + gotent = me->module_core_rw + me->arch.got_offset +
8640 info->got_offset;
8641 *gotent = val;
8642 info->got_initialized = 1;
8643 @@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8644 rc = apply_rela_bits(loc, val, 0, 64, 0);
8645 else if (r_type == R_390_GOTENT ||
8646 r_type == R_390_GOTPLTENT) {
8647 - val += (Elf_Addr) me->module_core - loc;
8648 + val += (Elf_Addr) me->module_core_rw - loc;
8649 rc = apply_rela_bits(loc, val, 1, 32, 1);
8650 }
8651 break;
8652 @@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8653 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
8654 if (info->plt_initialized == 0) {
8655 unsigned int *ip;
8656 - ip = me->module_core + me->arch.plt_offset +
8657 + ip = me->module_core_rx + me->arch.plt_offset +
8658 info->plt_offset;
8659 #ifndef CONFIG_64BIT
8660 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
8661 @@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8662 val - loc + 0xffffUL < 0x1ffffeUL) ||
8663 (r_type == R_390_PLT32DBL &&
8664 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
8665 - val = (Elf_Addr) me->module_core +
8666 + val = (Elf_Addr) me->module_core_rx +
8667 me->arch.plt_offset +
8668 info->plt_offset;
8669 val += rela->r_addend - loc;
8670 @@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8671 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
8672 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
8673 val = val + rela->r_addend -
8674 - ((Elf_Addr) me->module_core + me->arch.got_offset);
8675 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
8676 if (r_type == R_390_GOTOFF16)
8677 rc = apply_rela_bits(loc, val, 0, 16, 0);
8678 else if (r_type == R_390_GOTOFF32)
8679 @@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
8680 break;
8681 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
8682 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
8683 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
8684 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
8685 rela->r_addend - loc;
8686 if (r_type == R_390_GOTPC)
8687 rc = apply_rela_bits(loc, val, 1, 32, 0);
8688 diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
8689 index c5dbb33..b41f4ee 100644
8690 --- a/arch/s390/kernel/process.c
8691 +++ b/arch/s390/kernel/process.c
8692 @@ -237,39 +237,3 @@ unsigned long get_wchan(struct task_struct *p)
8693 }
8694 return 0;
8695 }
8696 -
8697 -unsigned long arch_align_stack(unsigned long sp)
8698 -{
8699 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
8700 - sp -= get_random_int() & ~PAGE_MASK;
8701 - return sp & ~0xf;
8702 -}
8703 -
8704 -static inline unsigned long brk_rnd(void)
8705 -{
8706 - /* 8MB for 32bit, 1GB for 64bit */
8707 - if (is_32bit_task())
8708 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
8709 - else
8710 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
8711 -}
8712 -
8713 -unsigned long arch_randomize_brk(struct mm_struct *mm)
8714 -{
8715 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
8716 -
8717 - if (ret < mm->brk)
8718 - return mm->brk;
8719 - return ret;
8720 -}
8721 -
8722 -unsigned long randomize_et_dyn(unsigned long base)
8723 -{
8724 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
8725 -
8726 - if (!(current->flags & PF_RANDOMIZE))
8727 - return base;
8728 - if (ret < base)
8729 - return base;
8730 - return ret;
8731 -}
8732 diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
8733 index 4002329..99b67cb 100644
8734 --- a/arch/s390/mm/mmap.c
8735 +++ b/arch/s390/mm/mmap.c
8736 @@ -90,9 +90,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8737 */
8738 if (mmap_is_legacy()) {
8739 mm->mmap_base = TASK_UNMAPPED_BASE;
8740 +
8741 +#ifdef CONFIG_PAX_RANDMMAP
8742 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8743 + mm->mmap_base += mm->delta_mmap;
8744 +#endif
8745 +
8746 mm->get_unmapped_area = arch_get_unmapped_area;
8747 } else {
8748 mm->mmap_base = mmap_base();
8749 +
8750 +#ifdef CONFIG_PAX_RANDMMAP
8751 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8752 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8753 +#endif
8754 +
8755 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8756 }
8757 }
8758 @@ -173,9 +185,21 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8759 */
8760 if (mmap_is_legacy()) {
8761 mm->mmap_base = TASK_UNMAPPED_BASE;
8762 +
8763 +#ifdef CONFIG_PAX_RANDMMAP
8764 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8765 + mm->mmap_base += mm->delta_mmap;
8766 +#endif
8767 +
8768 mm->get_unmapped_area = s390_get_unmapped_area;
8769 } else {
8770 mm->mmap_base = mmap_base();
8771 +
8772 +#ifdef CONFIG_PAX_RANDMMAP
8773 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8774 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8775 +#endif
8776 +
8777 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
8778 }
8779 }
8780 diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
8781 index ae3d59f..f65f075 100644
8782 --- a/arch/score/include/asm/cache.h
8783 +++ b/arch/score/include/asm/cache.h
8784 @@ -1,7 +1,9 @@
8785 #ifndef _ASM_SCORE_CACHE_H
8786 #define _ASM_SCORE_CACHE_H
8787
8788 +#include <linux/const.h>
8789 +
8790 #define L1_CACHE_SHIFT 4
8791 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8792 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8793
8794 #endif /* _ASM_SCORE_CACHE_H */
8795 diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
8796 index f9f3cd5..58ff438 100644
8797 --- a/arch/score/include/asm/exec.h
8798 +++ b/arch/score/include/asm/exec.h
8799 @@ -1,6 +1,6 @@
8800 #ifndef _ASM_SCORE_EXEC_H
8801 #define _ASM_SCORE_EXEC_H
8802
8803 -extern unsigned long arch_align_stack(unsigned long sp);
8804 +#define arch_align_stack(x) (x)
8805
8806 #endif /* _ASM_SCORE_EXEC_H */
8807 diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
8808 index a1519ad3..e8ac1ff 100644
8809 --- a/arch/score/kernel/process.c
8810 +++ b/arch/score/kernel/process.c
8811 @@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
8812
8813 return task_pt_regs(task)->cp0_epc;
8814 }
8815 -
8816 -unsigned long arch_align_stack(unsigned long sp)
8817 -{
8818 - return sp;
8819 -}
8820 diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
8821 index ef9e555..331bd29 100644
8822 --- a/arch/sh/include/asm/cache.h
8823 +++ b/arch/sh/include/asm/cache.h
8824 @@ -9,10 +9,11 @@
8825 #define __ASM_SH_CACHE_H
8826 #ifdef __KERNEL__
8827
8828 +#include <linux/const.h>
8829 #include <linux/init.h>
8830 #include <cpu/cache.h>
8831
8832 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8833 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8834
8835 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
8836
8837 diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
8838 index 6777177..cb5e44f 100644
8839 --- a/arch/sh/mm/mmap.c
8840 +++ b/arch/sh/mm/mmap.c
8841 @@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8842 struct mm_struct *mm = current->mm;
8843 struct vm_area_struct *vma;
8844 int do_colour_align;
8845 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8846 struct vm_unmapped_area_info info;
8847
8848 if (flags & MAP_FIXED) {
8849 @@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8850 if (filp || (flags & MAP_SHARED))
8851 do_colour_align = 1;
8852
8853 +#ifdef CONFIG_PAX_RANDMMAP
8854 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8855 +#endif
8856 +
8857 if (addr) {
8858 if (do_colour_align)
8859 addr = COLOUR_ALIGN(addr, pgoff);
8860 @@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
8861 addr = PAGE_ALIGN(addr);
8862
8863 vma = find_vma(mm, addr);
8864 - if (TASK_SIZE - len >= addr &&
8865 - (!vma || addr + len <= vma->vm_start))
8866 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8867 return addr;
8868 }
8869
8870 info.flags = 0;
8871 info.length = len;
8872 - info.low_limit = TASK_UNMAPPED_BASE;
8873 + info.low_limit = mm->mmap_base;
8874 info.high_limit = TASK_SIZE;
8875 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
8876 info.align_offset = pgoff << PAGE_SHIFT;
8877 @@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8878 struct mm_struct *mm = current->mm;
8879 unsigned long addr = addr0;
8880 int do_colour_align;
8881 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8882 struct vm_unmapped_area_info info;
8883
8884 if (flags & MAP_FIXED) {
8885 @@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8886 if (filp || (flags & MAP_SHARED))
8887 do_colour_align = 1;
8888
8889 +#ifdef CONFIG_PAX_RANDMMAP
8890 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8891 +#endif
8892 +
8893 /* requesting a specific address */
8894 if (addr) {
8895 if (do_colour_align)
8896 @@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8897 addr = PAGE_ALIGN(addr);
8898
8899 vma = find_vma(mm, addr);
8900 - if (TASK_SIZE - len >= addr &&
8901 - (!vma || addr + len <= vma->vm_start))
8902 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8903 return addr;
8904 }
8905
8906 @@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8907 VM_BUG_ON(addr != -ENOMEM);
8908 info.flags = 0;
8909 info.low_limit = TASK_UNMAPPED_BASE;
8910 +
8911 +#ifdef CONFIG_PAX_RANDMMAP
8912 + if (mm->pax_flags & MF_PAX_RANDMMAP)
8913 + info.low_limit += mm->delta_mmap;
8914 +#endif
8915 +
8916 info.high_limit = TASK_SIZE;
8917 addr = vm_unmapped_area(&info);
8918 }
8919 diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
8920 index be56a24..443328f 100644
8921 --- a/arch/sparc/include/asm/atomic_64.h
8922 +++ b/arch/sparc/include/asm/atomic_64.h
8923 @@ -14,18 +14,40 @@
8924 #define ATOMIC64_INIT(i) { (i) }
8925
8926 #define atomic_read(v) (*(volatile int *)&(v)->counter)
8927 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8928 +{
8929 + return v->counter;
8930 +}
8931 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
8932 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
8933 +{
8934 + return v->counter;
8935 +}
8936
8937 #define atomic_set(v, i) (((v)->counter) = i)
8938 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8939 +{
8940 + v->counter = i;
8941 +}
8942 #define atomic64_set(v, i) (((v)->counter) = i)
8943 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
8944 +{
8945 + v->counter = i;
8946 +}
8947
8948 extern void atomic_add(int, atomic_t *);
8949 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
8950 extern void atomic64_add(long, atomic64_t *);
8951 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
8952 extern void atomic_sub(int, atomic_t *);
8953 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
8954 extern void atomic64_sub(long, atomic64_t *);
8955 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
8956
8957 extern int atomic_add_ret(int, atomic_t *);
8958 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
8959 extern long atomic64_add_ret(long, atomic64_t *);
8960 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
8961 extern int atomic_sub_ret(int, atomic_t *);
8962 extern long atomic64_sub_ret(long, atomic64_t *);
8963
8964 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8965 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
8966
8967 #define atomic_inc_return(v) atomic_add_ret(1, v)
8968 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8969 +{
8970 + return atomic_add_ret_unchecked(1, v);
8971 +}
8972 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
8973 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
8974 +{
8975 + return atomic64_add_ret_unchecked(1, v);
8976 +}
8977
8978 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
8979 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
8980
8981 #define atomic_add_return(i, v) atomic_add_ret(i, v)
8982 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8983 +{
8984 + return atomic_add_ret_unchecked(i, v);
8985 +}
8986 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
8987 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
8988 +{
8989 + return atomic64_add_ret_unchecked(i, v);
8990 +}
8991
8992 /*
8993 * atomic_inc_and_test - increment and test
8994 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
8995 * other cases.
8996 */
8997 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
8998 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8999 +{
9000 + return atomic_inc_return_unchecked(v) == 0;
9001 +}
9002 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
9003
9004 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
9005 @@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
9006 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
9007
9008 #define atomic_inc(v) atomic_add(1, v)
9009 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9010 +{
9011 + atomic_add_unchecked(1, v);
9012 +}
9013 #define atomic64_inc(v) atomic64_add(1, v)
9014 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9015 +{
9016 + atomic64_add_unchecked(1, v);
9017 +}
9018
9019 #define atomic_dec(v) atomic_sub(1, v)
9020 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9021 +{
9022 + atomic_sub_unchecked(1, v);
9023 +}
9024 #define atomic64_dec(v) atomic64_sub(1, v)
9025 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9026 +{
9027 + atomic64_sub_unchecked(1, v);
9028 +}
9029
9030 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
9031 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
9032
9033 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
9034 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9035 +{
9036 + return cmpxchg(&v->counter, old, new);
9037 +}
9038 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
9039 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9040 +{
9041 + return xchg(&v->counter, new);
9042 +}
9043
9044 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9045 {
9046 - int c, old;
9047 + int c, old, new;
9048 c = atomic_read(v);
9049 for (;;) {
9050 - if (unlikely(c == (u)))
9051 + if (unlikely(c == u))
9052 break;
9053 - old = atomic_cmpxchg((v), c, c + (a));
9054 +
9055 + asm volatile("addcc %2, %0, %0\n"
9056 +
9057 +#ifdef CONFIG_PAX_REFCOUNT
9058 + "tvs %%icc, 6\n"
9059 +#endif
9060 +
9061 + : "=r" (new)
9062 + : "0" (c), "ir" (a)
9063 + : "cc");
9064 +
9065 + old = atomic_cmpxchg(v, c, new);
9066 if (likely(old == c))
9067 break;
9068 c = old;
9069 @@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
9070 #define atomic64_cmpxchg(v, o, n) \
9071 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
9072 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
9073 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9074 +{
9075 + return xchg(&v->counter, new);
9076 +}
9077
9078 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
9079 {
9080 - long c, old;
9081 + long c, old, new;
9082 c = atomic64_read(v);
9083 for (;;) {
9084 - if (unlikely(c == (u)))
9085 + if (unlikely(c == u))
9086 break;
9087 - old = atomic64_cmpxchg((v), c, c + (a));
9088 +
9089 + asm volatile("addcc %2, %0, %0\n"
9090 +
9091 +#ifdef CONFIG_PAX_REFCOUNT
9092 + "tvs %%xcc, 6\n"
9093 +#endif
9094 +
9095 + : "=r" (new)
9096 + : "0" (c), "ir" (a)
9097 + : "cc");
9098 +
9099 + old = atomic64_cmpxchg(v, c, new);
9100 if (likely(old == c))
9101 break;
9102 c = old;
9103 }
9104 - return c != (u);
9105 + return c != u;
9106 }
9107
9108 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9109 diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
9110 index 5bb6991..5c2132e 100644
9111 --- a/arch/sparc/include/asm/cache.h
9112 +++ b/arch/sparc/include/asm/cache.h
9113 @@ -7,10 +7,12 @@
9114 #ifndef _SPARC_CACHE_H
9115 #define _SPARC_CACHE_H
9116
9117 +#include <linux/const.h>
9118 +
9119 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
9120
9121 #define L1_CACHE_SHIFT 5
9122 -#define L1_CACHE_BYTES 32
9123 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9124
9125 #ifdef CONFIG_SPARC32
9126 #define SMP_CACHE_BYTES_SHIFT 5
9127 diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
9128 index a24e41f..47677ff 100644
9129 --- a/arch/sparc/include/asm/elf_32.h
9130 +++ b/arch/sparc/include/asm/elf_32.h
9131 @@ -114,6 +114,13 @@ typedef struct {
9132
9133 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
9134
9135 +#ifdef CONFIG_PAX_ASLR
9136 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
9137 +
9138 +#define PAX_DELTA_MMAP_LEN 16
9139 +#define PAX_DELTA_STACK_LEN 16
9140 +#endif
9141 +
9142 /* This yields a mask that user programs can use to figure out what
9143 instruction set this cpu supports. This can NOT be done in userspace
9144 on Sparc. */
9145 diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
9146 index 370ca1e..d4f4a98 100644
9147 --- a/arch/sparc/include/asm/elf_64.h
9148 +++ b/arch/sparc/include/asm/elf_64.h
9149 @@ -189,6 +189,13 @@ typedef struct {
9150 #define ELF_ET_DYN_BASE 0x0000010000000000UL
9151 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
9152
9153 +#ifdef CONFIG_PAX_ASLR
9154 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
9155 +
9156 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
9157 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
9158 +#endif
9159 +
9160 extern unsigned long sparc64_elf_hwcap;
9161 #define ELF_HWCAP sparc64_elf_hwcap
9162
9163 diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
9164 index 9b1c36d..209298b 100644
9165 --- a/arch/sparc/include/asm/pgalloc_32.h
9166 +++ b/arch/sparc/include/asm/pgalloc_32.h
9167 @@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
9168 }
9169
9170 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
9171 +#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
9172
9173 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
9174 unsigned long address)
9175 diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
9176 index bcfe063..b333142 100644
9177 --- a/arch/sparc/include/asm/pgalloc_64.h
9178 +++ b/arch/sparc/include/asm/pgalloc_64.h
9179 @@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
9180 }
9181
9182 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
9183 +#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
9184
9185 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
9186 {
9187 diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
9188 index 502f632..da1917f 100644
9189 --- a/arch/sparc/include/asm/pgtable_32.h
9190 +++ b/arch/sparc/include/asm/pgtable_32.h
9191 @@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
9192 #define PAGE_SHARED SRMMU_PAGE_SHARED
9193 #define PAGE_COPY SRMMU_PAGE_COPY
9194 #define PAGE_READONLY SRMMU_PAGE_RDONLY
9195 +#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
9196 +#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
9197 +#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
9198 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
9199
9200 /* Top-level page directory - dummy used by init-mm.
9201 @@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
9202
9203 /* xwr */
9204 #define __P000 PAGE_NONE
9205 -#define __P001 PAGE_READONLY
9206 -#define __P010 PAGE_COPY
9207 -#define __P011 PAGE_COPY
9208 +#define __P001 PAGE_READONLY_NOEXEC
9209 +#define __P010 PAGE_COPY_NOEXEC
9210 +#define __P011 PAGE_COPY_NOEXEC
9211 #define __P100 PAGE_READONLY
9212 #define __P101 PAGE_READONLY
9213 #define __P110 PAGE_COPY
9214 #define __P111 PAGE_COPY
9215
9216 #define __S000 PAGE_NONE
9217 -#define __S001 PAGE_READONLY
9218 -#define __S010 PAGE_SHARED
9219 -#define __S011 PAGE_SHARED
9220 +#define __S001 PAGE_READONLY_NOEXEC
9221 +#define __S010 PAGE_SHARED_NOEXEC
9222 +#define __S011 PAGE_SHARED_NOEXEC
9223 #define __S100 PAGE_READONLY
9224 #define __S101 PAGE_READONLY
9225 #define __S110 PAGE_SHARED
9226 diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
9227 index 79da178..c2eede8 100644
9228 --- a/arch/sparc/include/asm/pgtsrmmu.h
9229 +++ b/arch/sparc/include/asm/pgtsrmmu.h
9230 @@ -115,6 +115,11 @@
9231 SRMMU_EXEC | SRMMU_REF)
9232 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
9233 SRMMU_EXEC | SRMMU_REF)
9234 +
9235 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
9236 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9237 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
9238 +
9239 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
9240 SRMMU_DIRTY | SRMMU_REF)
9241
9242 diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
9243 index 9689176..63c18ea 100644
9244 --- a/arch/sparc/include/asm/spinlock_64.h
9245 +++ b/arch/sparc/include/asm/spinlock_64.h
9246 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
9247
9248 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
9249
9250 -static void inline arch_read_lock(arch_rwlock_t *lock)
9251 +static inline void arch_read_lock(arch_rwlock_t *lock)
9252 {
9253 unsigned long tmp1, tmp2;
9254
9255 __asm__ __volatile__ (
9256 "1: ldsw [%2], %0\n"
9257 " brlz,pn %0, 2f\n"
9258 -"4: add %0, 1, %1\n"
9259 +"4: addcc %0, 1, %1\n"
9260 +
9261 +#ifdef CONFIG_PAX_REFCOUNT
9262 +" tvs %%icc, 6\n"
9263 +#endif
9264 +
9265 " cas [%2], %0, %1\n"
9266 " cmp %0, %1\n"
9267 " bne,pn %%icc, 1b\n"
9268 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
9269 " .previous"
9270 : "=&r" (tmp1), "=&r" (tmp2)
9271 : "r" (lock)
9272 - : "memory");
9273 + : "memory", "cc");
9274 }
9275
9276 -static int inline arch_read_trylock(arch_rwlock_t *lock)
9277 +static inline int arch_read_trylock(arch_rwlock_t *lock)
9278 {
9279 int tmp1, tmp2;
9280
9281 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9282 "1: ldsw [%2], %0\n"
9283 " brlz,a,pn %0, 2f\n"
9284 " mov 0, %0\n"
9285 -" add %0, 1, %1\n"
9286 +" addcc %0, 1, %1\n"
9287 +
9288 +#ifdef CONFIG_PAX_REFCOUNT
9289 +" tvs %%icc, 6\n"
9290 +#endif
9291 +
9292 " cas [%2], %0, %1\n"
9293 " cmp %0, %1\n"
9294 " bne,pn %%icc, 1b\n"
9295 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
9296 return tmp1;
9297 }
9298
9299 -static void inline arch_read_unlock(arch_rwlock_t *lock)
9300 +static inline void arch_read_unlock(arch_rwlock_t *lock)
9301 {
9302 unsigned long tmp1, tmp2;
9303
9304 __asm__ __volatile__(
9305 "1: lduw [%2], %0\n"
9306 -" sub %0, 1, %1\n"
9307 +" subcc %0, 1, %1\n"
9308 +
9309 +#ifdef CONFIG_PAX_REFCOUNT
9310 +" tvs %%icc, 6\n"
9311 +#endif
9312 +
9313 " cas [%2], %0, %1\n"
9314 " cmp %0, %1\n"
9315 " bne,pn %%xcc, 1b\n"
9316 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
9317 : "memory");
9318 }
9319
9320 -static void inline arch_write_lock(arch_rwlock_t *lock)
9321 +static inline void arch_write_lock(arch_rwlock_t *lock)
9322 {
9323 unsigned long mask, tmp1, tmp2;
9324
9325 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
9326 : "memory");
9327 }
9328
9329 -static void inline arch_write_unlock(arch_rwlock_t *lock)
9330 +static inline void arch_write_unlock(arch_rwlock_t *lock)
9331 {
9332 __asm__ __volatile__(
9333 " stw %%g0, [%0]"
9334 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
9335 : "memory");
9336 }
9337
9338 -static int inline arch_write_trylock(arch_rwlock_t *lock)
9339 +static inline int arch_write_trylock(arch_rwlock_t *lock)
9340 {
9341 unsigned long mask, tmp1, tmp2, result;
9342
9343 diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
9344 index dd38075..e7cac83 100644
9345 --- a/arch/sparc/include/asm/thread_info_32.h
9346 +++ b/arch/sparc/include/asm/thread_info_32.h
9347 @@ -49,6 +49,8 @@ struct thread_info {
9348 unsigned long w_saved;
9349
9350 struct restart_block restart_block;
9351 +
9352 + unsigned long lowest_stack;
9353 };
9354
9355 /*
9356 diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
9357 index d5e5042..9bfee76 100644
9358 --- a/arch/sparc/include/asm/thread_info_64.h
9359 +++ b/arch/sparc/include/asm/thread_info_64.h
9360 @@ -63,6 +63,8 @@ struct thread_info {
9361 struct pt_regs *kern_una_regs;
9362 unsigned int kern_una_insn;
9363
9364 + unsigned long lowest_stack;
9365 +
9366 unsigned long fpregs[0] __attribute__ ((aligned(64)));
9367 };
9368
9369 @@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
9370 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
9371 /* flag bit 6 is available */
9372 #define TIF_32BIT 7 /* 32-bit binary */
9373 -/* flag bit 8 is available */
9374 +#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
9375 #define TIF_SECCOMP 9 /* secure computing */
9376 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
9377 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
9378 +
9379 /* NOTE: Thread flags >= 12 should be ones we have no interest
9380 * in using in assembly, else we can't use the mask as
9381 * an immediate value in instructions such as andcc.
9382 @@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
9383 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
9384 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
9385 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
9386 +#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
9387
9388 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
9389 _TIF_DO_NOTIFY_RESUME_MASK | \
9390 _TIF_NEED_RESCHED)
9391 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
9392
9393 +#define _TIF_WORK_SYSCALL \
9394 + (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
9395 + _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
9396 +
9397 +
9398 /*
9399 * Thread-synchronous status.
9400 *
9401 diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
9402 index 0167d26..767bb0c 100644
9403 --- a/arch/sparc/include/asm/uaccess.h
9404 +++ b/arch/sparc/include/asm/uaccess.h
9405 @@ -1,5 +1,6 @@
9406 #ifndef ___ASM_SPARC_UACCESS_H
9407 #define ___ASM_SPARC_UACCESS_H
9408 +
9409 #if defined(__sparc__) && defined(__arch64__)
9410 #include <asm/uaccess_64.h>
9411 #else
9412 diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
9413 index 53a28dd..50c38c3 100644
9414 --- a/arch/sparc/include/asm/uaccess_32.h
9415 +++ b/arch/sparc/include/asm/uaccess_32.h
9416 @@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
9417
9418 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
9419 {
9420 - if (n && __access_ok((unsigned long) to, n))
9421 + if ((long)n < 0)
9422 + return n;
9423 +
9424 + if (n && __access_ok((unsigned long) to, n)) {
9425 + if (!__builtin_constant_p(n))
9426 + check_object_size(from, n, true);
9427 return __copy_user(to, (__force void __user *) from, n);
9428 - else
9429 + } else
9430 return n;
9431 }
9432
9433 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
9434 {
9435 + if ((long)n < 0)
9436 + return n;
9437 +
9438 + if (!__builtin_constant_p(n))
9439 + check_object_size(from, n, true);
9440 +
9441 return __copy_user(to, (__force void __user *) from, n);
9442 }
9443
9444 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
9445 {
9446 - if (n && __access_ok((unsigned long) from, n))
9447 + if ((long)n < 0)
9448 + return n;
9449 +
9450 + if (n && __access_ok((unsigned long) from, n)) {
9451 + if (!__builtin_constant_p(n))
9452 + check_object_size(to, n, false);
9453 return __copy_user((__force void __user *) to, from, n);
9454 - else
9455 + } else
9456 return n;
9457 }
9458
9459 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
9460 {
9461 + if ((long)n < 0)
9462 + return n;
9463 +
9464 return __copy_user((__force void __user *) to, from, n);
9465 }
9466
9467 diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
9468 index e562d3c..191f176 100644
9469 --- a/arch/sparc/include/asm/uaccess_64.h
9470 +++ b/arch/sparc/include/asm/uaccess_64.h
9471 @@ -10,6 +10,7 @@
9472 #include <linux/compiler.h>
9473 #include <linux/string.h>
9474 #include <linux/thread_info.h>
9475 +#include <linux/kernel.h>
9476 #include <asm/asi.h>
9477 #include <asm/spitfire.h>
9478 #include <asm-generic/uaccess-unaligned.h>
9479 @@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
9480 static inline unsigned long __must_check
9481 copy_from_user(void *to, const void __user *from, unsigned long size)
9482 {
9483 - unsigned long ret = ___copy_from_user(to, from, size);
9484 + unsigned long ret;
9485
9486 + if ((long)size < 0 || size > INT_MAX)
9487 + return size;
9488 +
9489 + if (!__builtin_constant_p(size))
9490 + check_object_size(to, size, false);
9491 +
9492 + ret = ___copy_from_user(to, from, size);
9493 if (unlikely(ret))
9494 ret = copy_from_user_fixup(to, from, size);
9495
9496 @@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
9497 static inline unsigned long __must_check
9498 copy_to_user(void __user *to, const void *from, unsigned long size)
9499 {
9500 - unsigned long ret = ___copy_to_user(to, from, size);
9501 + unsigned long ret;
9502
9503 + if ((long)size < 0 || size > INT_MAX)
9504 + return size;
9505 +
9506 + if (!__builtin_constant_p(size))
9507 + check_object_size(from, size, true);
9508 +
9509 + ret = ___copy_to_user(to, from, size);
9510 if (unlikely(ret))
9511 ret = copy_to_user_fixup(to, from, size);
9512 return ret;
9513 diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
9514 index d15cc17..d0ae796 100644
9515 --- a/arch/sparc/kernel/Makefile
9516 +++ b/arch/sparc/kernel/Makefile
9517 @@ -4,7 +4,7 @@
9518 #
9519
9520 asflags-y := -ansi
9521 -ccflags-y := -Werror
9522 +#ccflags-y := -Werror
9523
9524 extra-y := head_$(BITS).o
9525
9526 diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
9527 index fdd819d..5af08c8 100644
9528 --- a/arch/sparc/kernel/process_32.c
9529 +++ b/arch/sparc/kernel/process_32.c
9530 @@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
9531
9532 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
9533 r->psr, r->pc, r->npc, r->y, print_tainted());
9534 - printk("PC: <%pS>\n", (void *) r->pc);
9535 + printk("PC: <%pA>\n", (void *) r->pc);
9536 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9537 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
9538 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
9539 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9540 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
9541 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
9542 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
9543 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
9544
9545 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
9546 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
9547 @@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
9548 rw = (struct reg_window32 *) fp;
9549 pc = rw->ins[7];
9550 printk("[%08lx : ", pc);
9551 - printk("%pS ] ", (void *) pc);
9552 + printk("%pA ] ", (void *) pc);
9553 fp = rw->ins[6];
9554 } while (++count < 16);
9555 printk("\n");
9556 diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
9557 index baebab2..9cd13b1 100644
9558 --- a/arch/sparc/kernel/process_64.c
9559 +++ b/arch/sparc/kernel/process_64.c
9560 @@ -158,7 +158,7 @@ static void show_regwindow(struct pt_regs *regs)
9561 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
9562 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
9563 if (regs->tstate & TSTATE_PRIV)
9564 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
9565 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
9566 }
9567
9568 void show_regs(struct pt_regs *regs)
9569 @@ -167,7 +167,7 @@ void show_regs(struct pt_regs *regs)
9570
9571 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
9572 regs->tpc, regs->tnpc, regs->y, print_tainted());
9573 - printk("TPC: <%pS>\n", (void *) regs->tpc);
9574 + printk("TPC: <%pA>\n", (void *) regs->tpc);
9575 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
9576 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
9577 regs->u_regs[3]);
9578 @@ -180,7 +180,7 @@ void show_regs(struct pt_regs *regs)
9579 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
9580 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
9581 regs->u_regs[15]);
9582 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
9583 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
9584 show_regwindow(regs);
9585 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
9586 }
9587 @@ -269,7 +269,7 @@ void arch_trigger_all_cpu_backtrace(void)
9588 ((tp && tp->task) ? tp->task->pid : -1));
9589
9590 if (gp->tstate & TSTATE_PRIV) {
9591 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
9592 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
9593 (void *) gp->tpc,
9594 (void *) gp->o7,
9595 (void *) gp->i7,
9596 diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
9597 index 79cc0d1..ec62734 100644
9598 --- a/arch/sparc/kernel/prom_common.c
9599 +++ b/arch/sparc/kernel/prom_common.c
9600 @@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
9601
9602 unsigned int prom_early_allocated __initdata;
9603
9604 -static struct of_pdt_ops prom_sparc_ops __initdata = {
9605 +static struct of_pdt_ops prom_sparc_ops __initconst = {
9606 .nextprop = prom_common_nextprop,
9607 .getproplen = prom_getproplen,
9608 .getproperty = prom_getproperty,
9609 diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
9610 index 773c1f2..a8bdd87 100644
9611 --- a/arch/sparc/kernel/ptrace_64.c
9612 +++ b/arch/sparc/kernel/ptrace_64.c
9613 @@ -1059,6 +1059,10 @@ long arch_ptrace(struct task_struct *child, long request,
9614 return ret;
9615 }
9616
9617 +#ifdef CONFIG_GRKERNSEC_SETXID
9618 +extern void gr_delayed_cred_worker(void);
9619 +#endif
9620 +
9621 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9622 {
9623 int ret = 0;
9624 @@ -1066,6 +1070,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9625 /* do the secure computing check first */
9626 secure_computing_strict(regs->u_regs[UREG_G1]);
9627
9628 +#ifdef CONFIG_GRKERNSEC_SETXID
9629 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9630 + gr_delayed_cred_worker();
9631 +#endif
9632 +
9633 if (test_thread_flag(TIF_SYSCALL_TRACE))
9634 ret = tracehook_report_syscall_entry(regs);
9635
9636 @@ -1086,6 +1095,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
9637
9638 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
9639 {
9640 +#ifdef CONFIG_GRKERNSEC_SETXID
9641 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
9642 + gr_delayed_cred_worker();
9643 +#endif
9644 +
9645 audit_syscall_exit(regs);
9646
9647 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
9648 diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
9649 index e142545..fd29654 100644
9650 --- a/arch/sparc/kernel/smp_64.c
9651 +++ b/arch/sparc/kernel/smp_64.c
9652 @@ -869,8 +869,8 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
9653 extern unsigned long xcall_flush_dcache_page_spitfire;
9654
9655 #ifdef CONFIG_DEBUG_DCFLUSH
9656 -extern atomic_t dcpage_flushes;
9657 -extern atomic_t dcpage_flushes_xcall;
9658 +extern atomic_unchecked_t dcpage_flushes;
9659 +extern atomic_unchecked_t dcpage_flushes_xcall;
9660 #endif
9661
9662 static inline void __local_flush_dcache_page(struct page *page)
9663 @@ -894,7 +894,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
9664 return;
9665
9666 #ifdef CONFIG_DEBUG_DCFLUSH
9667 - atomic_inc(&dcpage_flushes);
9668 + atomic_inc_unchecked(&dcpage_flushes);
9669 #endif
9670
9671 this_cpu = get_cpu();
9672 @@ -918,7 +918,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
9673 xcall_deliver(data0, __pa(pg_addr),
9674 (u64) pg_addr, cpumask_of(cpu));
9675 #ifdef CONFIG_DEBUG_DCFLUSH
9676 - atomic_inc(&dcpage_flushes_xcall);
9677 + atomic_inc_unchecked(&dcpage_flushes_xcall);
9678 #endif
9679 }
9680 }
9681 @@ -937,7 +937,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
9682 preempt_disable();
9683
9684 #ifdef CONFIG_DEBUG_DCFLUSH
9685 - atomic_inc(&dcpage_flushes);
9686 + atomic_inc_unchecked(&dcpage_flushes);
9687 #endif
9688 data0 = 0;
9689 pg_addr = page_address(page);
9690 @@ -954,7 +954,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
9691 xcall_deliver(data0, __pa(pg_addr),
9692 (u64) pg_addr, cpu_online_mask);
9693 #ifdef CONFIG_DEBUG_DCFLUSH
9694 - atomic_inc(&dcpage_flushes_xcall);
9695 + atomic_inc_unchecked(&dcpage_flushes_xcall);
9696 #endif
9697 }
9698 __local_flush_dcache_page(page);
9699 diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
9700 index 3a8d184..49498a8 100644
9701 --- a/arch/sparc/kernel/sys_sparc_32.c
9702 +++ b/arch/sparc/kernel/sys_sparc_32.c
9703 @@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9704 if (len > TASK_SIZE - PAGE_SIZE)
9705 return -ENOMEM;
9706 if (!addr)
9707 - addr = TASK_UNMAPPED_BASE;
9708 + addr = current->mm->mmap_base;
9709
9710 info.flags = 0;
9711 info.length = len;
9712 diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
9713 index 51561b8..8256764 100644
9714 --- a/arch/sparc/kernel/sys_sparc_64.c
9715 +++ b/arch/sparc/kernel/sys_sparc_64.c
9716 @@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9717 struct vm_area_struct * vma;
9718 unsigned long task_size = TASK_SIZE;
9719 int do_color_align;
9720 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9721 struct vm_unmapped_area_info info;
9722
9723 if (flags & MAP_FIXED) {
9724 /* We do not accept a shared mapping if it would violate
9725 * cache aliasing constraints.
9726 */
9727 - if ((flags & MAP_SHARED) &&
9728 + if ((filp || (flags & MAP_SHARED)) &&
9729 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9730 return -EINVAL;
9731 return addr;
9732 @@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9733 if (filp || (flags & MAP_SHARED))
9734 do_color_align = 1;
9735
9736 +#ifdef CONFIG_PAX_RANDMMAP
9737 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9738 +#endif
9739 +
9740 if (addr) {
9741 if (do_color_align)
9742 addr = COLOR_ALIGN(addr, pgoff);
9743 @@ -118,22 +123,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
9744 addr = PAGE_ALIGN(addr);
9745
9746 vma = find_vma(mm, addr);
9747 - if (task_size - len >= addr &&
9748 - (!vma || addr + len <= vma->vm_start))
9749 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9750 return addr;
9751 }
9752
9753 info.flags = 0;
9754 info.length = len;
9755 - info.low_limit = TASK_UNMAPPED_BASE;
9756 + info.low_limit = mm->mmap_base;
9757 info.high_limit = min(task_size, VA_EXCLUDE_START);
9758 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9759 info.align_offset = pgoff << PAGE_SHIFT;
9760 + info.threadstack_offset = offset;
9761 addr = vm_unmapped_area(&info);
9762
9763 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9764 VM_BUG_ON(addr != -ENOMEM);
9765 info.low_limit = VA_EXCLUDE_END;
9766 +
9767 +#ifdef CONFIG_PAX_RANDMMAP
9768 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9769 + info.low_limit += mm->delta_mmap;
9770 +#endif
9771 +
9772 info.high_limit = task_size;
9773 addr = vm_unmapped_area(&info);
9774 }
9775 @@ -151,6 +162,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9776 unsigned long task_size = STACK_TOP32;
9777 unsigned long addr = addr0;
9778 int do_color_align;
9779 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
9780 struct vm_unmapped_area_info info;
9781
9782 /* This should only ever run for 32-bit processes. */
9783 @@ -160,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9784 /* We do not accept a shared mapping if it would violate
9785 * cache aliasing constraints.
9786 */
9787 - if ((flags & MAP_SHARED) &&
9788 + if ((filp || (flags & MAP_SHARED)) &&
9789 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
9790 return -EINVAL;
9791 return addr;
9792 @@ -173,6 +185,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9793 if (filp || (flags & MAP_SHARED))
9794 do_color_align = 1;
9795
9796 +#ifdef CONFIG_PAX_RANDMMAP
9797 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9798 +#endif
9799 +
9800 /* requesting a specific address */
9801 if (addr) {
9802 if (do_color_align)
9803 @@ -181,8 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9804 addr = PAGE_ALIGN(addr);
9805
9806 vma = find_vma(mm, addr);
9807 - if (task_size - len >= addr &&
9808 - (!vma || addr + len <= vma->vm_start))
9809 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9810 return addr;
9811 }
9812
9813 @@ -192,6 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9814 info.high_limit = mm->mmap_base;
9815 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
9816 info.align_offset = pgoff << PAGE_SHIFT;
9817 + info.threadstack_offset = offset;
9818 addr = vm_unmapped_area(&info);
9819
9820 /*
9821 @@ -204,6 +220,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9822 VM_BUG_ON(addr != -ENOMEM);
9823 info.flags = 0;
9824 info.low_limit = TASK_UNMAPPED_BASE;
9825 +
9826 +#ifdef CONFIG_PAX_RANDMMAP
9827 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9828 + info.low_limit += mm->delta_mmap;
9829 +#endif
9830 +
9831 info.high_limit = STACK_TOP32;
9832 addr = vm_unmapped_area(&info);
9833 }
9834 @@ -260,10 +282,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
9835 EXPORT_SYMBOL(get_fb_unmapped_area);
9836
9837 /* Essentially the same as PowerPC. */
9838 -static unsigned long mmap_rnd(void)
9839 +static unsigned long mmap_rnd(struct mm_struct *mm)
9840 {
9841 unsigned long rnd = 0UL;
9842
9843 +#ifdef CONFIG_PAX_RANDMMAP
9844 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9845 +#endif
9846 +
9847 if (current->flags & PF_RANDOMIZE) {
9848 unsigned long val = get_random_int();
9849 if (test_thread_flag(TIF_32BIT))
9850 @@ -276,7 +302,7 @@ static unsigned long mmap_rnd(void)
9851
9852 void arch_pick_mmap_layout(struct mm_struct *mm)
9853 {
9854 - unsigned long random_factor = mmap_rnd();
9855 + unsigned long random_factor = mmap_rnd(mm);
9856 unsigned long gap;
9857
9858 /*
9859 @@ -289,6 +315,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9860 gap == RLIM_INFINITY ||
9861 sysctl_legacy_va_layout) {
9862 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
9863 +
9864 +#ifdef CONFIG_PAX_RANDMMAP
9865 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9866 + mm->mmap_base += mm->delta_mmap;
9867 +#endif
9868 +
9869 mm->get_unmapped_area = arch_get_unmapped_area;
9870 } else {
9871 /* We know it's 32-bit */
9872 @@ -300,6 +332,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
9873 gap = (task_size / 6 * 5);
9874
9875 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
9876 +
9877 +#ifdef CONFIG_PAX_RANDMMAP
9878 + if (mm->pax_flags & MF_PAX_RANDMMAP)
9879 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
9880 +#endif
9881 +
9882 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
9883 }
9884 }
9885 diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
9886 index d950197..192f9d8 100644
9887 --- a/arch/sparc/kernel/syscalls.S
9888 +++ b/arch/sparc/kernel/syscalls.S
9889 @@ -52,7 +52,7 @@ sys32_rt_sigreturn:
9890 #endif
9891 .align 32
9892 1: ldx [%g6 + TI_FLAGS], %l5
9893 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9894 + andcc %l5, _TIF_WORK_SYSCALL, %g0
9895 be,pt %icc, rtrap
9896 nop
9897 call syscall_trace_leave
9898 @@ -184,7 +184,7 @@ linux_sparc_syscall32:
9899
9900 srl %i3, 0, %o3 ! IEU0
9901 srl %i2, 0, %o2 ! IEU0 Group
9902 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9903 + andcc %l0, _TIF_WORK_SYSCALL, %g0
9904 bne,pn %icc, linux_syscall_trace32 ! CTI
9905 mov %i0, %l5 ! IEU1
9906 5: call %l7 ! CTI Group brk forced
9907 @@ -207,7 +207,7 @@ linux_sparc_syscall:
9908
9909 mov %i3, %o3 ! IEU1
9910 mov %i4, %o4 ! IEU0 Group
9911 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9912 + andcc %l0, _TIF_WORK_SYSCALL, %g0
9913 bne,pn %icc, linux_syscall_trace ! CTI Group
9914 mov %i0, %l5 ! IEU0
9915 2: call %l7 ! CTI Group brk forced
9916 @@ -223,7 +223,7 @@ ret_sys_call:
9917
9918 cmp %o0, -ERESTART_RESTARTBLOCK
9919 bgeu,pn %xcc, 1f
9920 - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
9921 + andcc %l0, _TIF_WORK_SYSCALL, %g0
9922 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
9923
9924 2:
9925 diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
9926 index 6629829..036032d 100644
9927 --- a/arch/sparc/kernel/traps_32.c
9928 +++ b/arch/sparc/kernel/traps_32.c
9929 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
9930 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
9931 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
9932
9933 +extern void gr_handle_kernel_exploit(void);
9934 +
9935 void die_if_kernel(char *str, struct pt_regs *regs)
9936 {
9937 static int die_counter;
9938 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
9939 count++ < 30 &&
9940 (((unsigned long) rw) >= PAGE_OFFSET) &&
9941 !(((unsigned long) rw) & 0x7)) {
9942 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
9943 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
9944 (void *) rw->ins[7]);
9945 rw = (struct reg_window32 *)rw->ins[6];
9946 }
9947 }
9948 printk("Instruction DUMP:");
9949 instruction_dump ((unsigned long *) regs->pc);
9950 - if(regs->psr & PSR_PS)
9951 + if(regs->psr & PSR_PS) {
9952 + gr_handle_kernel_exploit();
9953 do_exit(SIGKILL);
9954 + }
9955 do_exit(SIGSEGV);
9956 }
9957
9958 diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
9959 index b3f833a..f485f80 100644
9960 --- a/arch/sparc/kernel/traps_64.c
9961 +++ b/arch/sparc/kernel/traps_64.c
9962 @@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
9963 i + 1,
9964 p->trapstack[i].tstate, p->trapstack[i].tpc,
9965 p->trapstack[i].tnpc, p->trapstack[i].tt);
9966 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
9967 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
9968 }
9969 }
9970
9971 @@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
9972
9973 lvl -= 0x100;
9974 if (regs->tstate & TSTATE_PRIV) {
9975 +
9976 +#ifdef CONFIG_PAX_REFCOUNT
9977 + if (lvl == 6)
9978 + pax_report_refcount_overflow(regs);
9979 +#endif
9980 +
9981 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
9982 die_if_kernel(buffer, regs);
9983 }
9984 @@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
9985 void bad_trap_tl1(struct pt_regs *regs, long lvl)
9986 {
9987 char buffer[32];
9988 -
9989 +
9990 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
9991 0, lvl, SIGTRAP) == NOTIFY_STOP)
9992 return;
9993
9994 +#ifdef CONFIG_PAX_REFCOUNT
9995 + if (lvl == 6)
9996 + pax_report_refcount_overflow(regs);
9997 +#endif
9998 +
9999 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
10000
10001 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
10002 @@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
10003 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
10004 printk("%s" "ERROR(%d): ",
10005 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
10006 - printk("TPC<%pS>\n", (void *) regs->tpc);
10007 + printk("TPC<%pA>\n", (void *) regs->tpc);
10008 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
10009 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
10010 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
10011 @@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10012 smp_processor_id(),
10013 (type & 0x1) ? 'I' : 'D',
10014 regs->tpc);
10015 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
10016 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
10017 panic("Irrecoverable Cheetah+ parity error.");
10018 }
10019
10020 @@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
10021 smp_processor_id(),
10022 (type & 0x1) ? 'I' : 'D',
10023 regs->tpc);
10024 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
10025 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
10026 }
10027
10028 struct sun4v_error_entry {
10029 @@ -1830,8 +1841,8 @@ struct sun4v_error_entry {
10030 /*0x38*/u64 reserved_5;
10031 };
10032
10033 -static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10034 -static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10035 +static atomic_unchecked_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0);
10036 +static atomic_unchecked_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0);
10037
10038 static const char *sun4v_err_type_to_str(u8 type)
10039 {
10040 @@ -1923,7 +1934,7 @@ static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs)
10041 }
10042
10043 static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10044 - int cpu, const char *pfx, atomic_t *ocnt)
10045 + int cpu, const char *pfx, atomic_unchecked_t *ocnt)
10046 {
10047 u64 *raw_ptr = (u64 *) ent;
10048 u32 attrs;
10049 @@ -1981,8 +1992,8 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent,
10050
10051 show_regs(regs);
10052
10053 - if ((cnt = atomic_read(ocnt)) != 0) {
10054 - atomic_set(ocnt, 0);
10055 + if ((cnt = atomic_read_unchecked(ocnt)) != 0) {
10056 + atomic_set_unchecked(ocnt, 0);
10057 wmb();
10058 printk("%s: Queue overflowed %d times.\n",
10059 pfx, cnt);
10060 @@ -2036,7 +2047,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
10061 */
10062 void sun4v_resum_overflow(struct pt_regs *regs)
10063 {
10064 - atomic_inc(&sun4v_resum_oflow_cnt);
10065 + atomic_inc_unchecked(&sun4v_resum_oflow_cnt);
10066 }
10067
10068 /* We run with %pil set to PIL_NORMAL_MAX and PSTATE_IE enabled in %pstate.
10069 @@ -2089,7 +2100,7 @@ void sun4v_nonresum_overflow(struct pt_regs *regs)
10070 /* XXX Actually even this can make not that much sense. Perhaps
10071 * XXX we should just pull the plug and panic directly from here?
10072 */
10073 - atomic_inc(&sun4v_nonresum_oflow_cnt);
10074 + atomic_inc_unchecked(&sun4v_nonresum_oflow_cnt);
10075 }
10076
10077 unsigned long sun4v_err_itlb_vaddr;
10078 @@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
10079
10080 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
10081 regs->tpc, tl);
10082 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
10083 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
10084 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10085 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
10086 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
10087 (void *) regs->u_regs[UREG_I7]);
10088 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
10089 "pte[%lx] error[%lx]\n",
10090 @@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
10091
10092 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
10093 regs->tpc, tl);
10094 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
10095 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
10096 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
10097 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
10098 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
10099 (void *) regs->u_regs[UREG_I7]);
10100 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
10101 "pte[%lx] error[%lx]\n",
10102 @@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
10103 fp = (unsigned long)sf->fp + STACK_BIAS;
10104 }
10105
10106 - printk(" [%016lx] %pS\n", pc, (void *) pc);
10107 + printk(" [%016lx] %pA\n", pc, (void *) pc);
10108 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
10109 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
10110 int index = tsk->curr_ret_stack;
10111 if (tsk->ret_stack && index >= graph) {
10112 pc = tsk->ret_stack[index - graph].ret;
10113 - printk(" [%016lx] %pS\n", pc, (void *) pc);
10114 + printk(" [%016lx] %pA\n", pc, (void *) pc);
10115 graph++;
10116 }
10117 }
10118 @@ -2360,6 +2371,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
10119 return (struct reg_window *) (fp + STACK_BIAS);
10120 }
10121
10122 +extern void gr_handle_kernel_exploit(void);
10123 +
10124 void die_if_kernel(char *str, struct pt_regs *regs)
10125 {
10126 static int die_counter;
10127 @@ -2388,7 +2401,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10128 while (rw &&
10129 count++ < 30 &&
10130 kstack_valid(tp, (unsigned long) rw)) {
10131 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
10132 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
10133 (void *) rw->ins[7]);
10134
10135 rw = kernel_stack_up(rw);
10136 @@ -2401,8 +2414,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
10137 }
10138 user_instruction_dump ((unsigned int __user *) regs->tpc);
10139 }
10140 - if (regs->tstate & TSTATE_PRIV)
10141 + if (regs->tstate & TSTATE_PRIV) {
10142 + gr_handle_kernel_exploit();
10143 do_exit(SIGKILL);
10144 + }
10145 do_exit(SIGSEGV);
10146 }
10147 EXPORT_SYMBOL(die_if_kernel);
10148 diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
10149 index 8201c25e..072a2a7 100644
10150 --- a/arch/sparc/kernel/unaligned_64.c
10151 +++ b/arch/sparc/kernel/unaligned_64.c
10152 @@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
10153 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
10154
10155 if (__ratelimit(&ratelimit)) {
10156 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
10157 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
10158 regs->tpc, (void *) regs->tpc);
10159 }
10160 }
10161 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
10162 index dbe119b..089c7c1 100644
10163 --- a/arch/sparc/lib/Makefile
10164 +++ b/arch/sparc/lib/Makefile
10165 @@ -2,7 +2,7 @@
10166 #
10167
10168 asflags-y := -ansi -DST_DIV0=0x02
10169 -ccflags-y := -Werror
10170 +#ccflags-y := -Werror
10171
10172 lib-$(CONFIG_SPARC32) += ashrdi3.o
10173 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
10174 diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
10175 index 85c233d..68500e0 100644
10176 --- a/arch/sparc/lib/atomic_64.S
10177 +++ b/arch/sparc/lib/atomic_64.S
10178 @@ -17,7 +17,12 @@
10179 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
10180 BACKOFF_SETUP(%o2)
10181 1: lduw [%o1], %g1
10182 - add %g1, %o0, %g7
10183 + addcc %g1, %o0, %g7
10184 +
10185 +#ifdef CONFIG_PAX_REFCOUNT
10186 + tvs %icc, 6
10187 +#endif
10188 +
10189 cas [%o1], %g1, %g7
10190 cmp %g1, %g7
10191 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10192 @@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
10193 2: BACKOFF_SPIN(%o2, %o3, 1b)
10194 ENDPROC(atomic_add)
10195
10196 +ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10197 + BACKOFF_SETUP(%o2)
10198 +1: lduw [%o1], %g1
10199 + add %g1, %o0, %g7
10200 + cas [%o1], %g1, %g7
10201 + cmp %g1, %g7
10202 + bne,pn %icc, 2f
10203 + nop
10204 + retl
10205 + nop
10206 +2: BACKOFF_SPIN(%o2, %o3, 1b)
10207 +ENDPROC(atomic_add_unchecked)
10208 +
10209 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10210 BACKOFF_SETUP(%o2)
10211 1: lduw [%o1], %g1
10212 - sub %g1, %o0, %g7
10213 + subcc %g1, %o0, %g7
10214 +
10215 +#ifdef CONFIG_PAX_REFCOUNT
10216 + tvs %icc, 6
10217 +#endif
10218 +
10219 cas [%o1], %g1, %g7
10220 cmp %g1, %g7
10221 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10222 @@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10223 2: BACKOFF_SPIN(%o2, %o3, 1b)
10224 ENDPROC(atomic_sub)
10225
10226 +ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10227 + BACKOFF_SETUP(%o2)
10228 +1: lduw [%o1], %g1
10229 + sub %g1, %o0, %g7
10230 + cas [%o1], %g1, %g7
10231 + cmp %g1, %g7
10232 + bne,pn %icc, 2f
10233 + nop
10234 + retl
10235 + nop
10236 +2: BACKOFF_SPIN(%o2, %o3, 1b)
10237 +ENDPROC(atomic_sub_unchecked)
10238 +
10239 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10240 BACKOFF_SETUP(%o2)
10241 1: lduw [%o1], %g1
10242 - add %g1, %o0, %g7
10243 + addcc %g1, %o0, %g7
10244 +
10245 +#ifdef CONFIG_PAX_REFCOUNT
10246 + tvs %icc, 6
10247 +#endif
10248 +
10249 cas [%o1], %g1, %g7
10250 cmp %g1, %g7
10251 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10252 @@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10253 2: BACKOFF_SPIN(%o2, %o3, 1b)
10254 ENDPROC(atomic_add_ret)
10255
10256 +ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10257 + BACKOFF_SETUP(%o2)
10258 +1: lduw [%o1], %g1
10259 + addcc %g1, %o0, %g7
10260 + cas [%o1], %g1, %g7
10261 + cmp %g1, %g7
10262 + bne,pn %icc, 2f
10263 + add %g7, %o0, %g7
10264 + sra %g7, 0, %o0
10265 + retl
10266 + nop
10267 +2: BACKOFF_SPIN(%o2, %o3, 1b)
10268 +ENDPROC(atomic_add_ret_unchecked)
10269 +
10270 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10271 BACKOFF_SETUP(%o2)
10272 1: lduw [%o1], %g1
10273 - sub %g1, %o0, %g7
10274 + subcc %g1, %o0, %g7
10275 +
10276 +#ifdef CONFIG_PAX_REFCOUNT
10277 + tvs %icc, 6
10278 +#endif
10279 +
10280 cas [%o1], %g1, %g7
10281 cmp %g1, %g7
10282 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
10283 @@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
10284 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10285 BACKOFF_SETUP(%o2)
10286 1: ldx [%o1], %g1
10287 - add %g1, %o0, %g7
10288 + addcc %g1, %o0, %g7
10289 +
10290 +#ifdef CONFIG_PAX_REFCOUNT
10291 + tvs %xcc, 6
10292 +#endif
10293 +
10294 casx [%o1], %g1, %g7
10295 cmp %g1, %g7
10296 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10297 @@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
10298 2: BACKOFF_SPIN(%o2, %o3, 1b)
10299 ENDPROC(atomic64_add)
10300
10301 +ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10302 + BACKOFF_SETUP(%o2)
10303 +1: ldx [%o1], %g1
10304 + addcc %g1, %o0, %g7
10305 + casx [%o1], %g1, %g7
10306 + cmp %g1, %g7
10307 + bne,pn %xcc, 2f
10308 + nop
10309 + retl
10310 + nop
10311 +2: BACKOFF_SPIN(%o2, %o3, 1b)
10312 +ENDPROC(atomic64_add_unchecked)
10313 +
10314 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10315 BACKOFF_SETUP(%o2)
10316 1: ldx [%o1], %g1
10317 - sub %g1, %o0, %g7
10318 + subcc %g1, %o0, %g7
10319 +
10320 +#ifdef CONFIG_PAX_REFCOUNT
10321 + tvs %xcc, 6
10322 +#endif
10323 +
10324 casx [%o1], %g1, %g7
10325 cmp %g1, %g7
10326 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10327 @@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
10328 2: BACKOFF_SPIN(%o2, %o3, 1b)
10329 ENDPROC(atomic64_sub)
10330
10331 +ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
10332 + BACKOFF_SETUP(%o2)
10333 +1: ldx [%o1], %g1
10334 + subcc %g1, %o0, %g7
10335 + casx [%o1], %g1, %g7
10336 + cmp %g1, %g7
10337 + bne,pn %xcc, 2f
10338 + nop
10339 + retl
10340 + nop
10341 +2: BACKOFF_SPIN(%o2, %o3, 1b)
10342 +ENDPROC(atomic64_sub_unchecked)
10343 +
10344 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10345 BACKOFF_SETUP(%o2)
10346 1: ldx [%o1], %g1
10347 - add %g1, %o0, %g7
10348 + addcc %g1, %o0, %g7
10349 +
10350 +#ifdef CONFIG_PAX_REFCOUNT
10351 + tvs %xcc, 6
10352 +#endif
10353 +
10354 casx [%o1], %g1, %g7
10355 cmp %g1, %g7
10356 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10357 @@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
10358 2: BACKOFF_SPIN(%o2, %o3, 1b)
10359 ENDPROC(atomic64_add_ret)
10360
10361 +ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
10362 + BACKOFF_SETUP(%o2)
10363 +1: ldx [%o1], %g1
10364 + addcc %g1, %o0, %g7
10365 + casx [%o1], %g1, %g7
10366 + cmp %g1, %g7
10367 + bne,pn %xcc, 2f
10368 + add %g7, %o0, %g7
10369 + mov %g7, %o0
10370 + retl
10371 + nop
10372 +2: BACKOFF_SPIN(%o2, %o3, 1b)
10373 +ENDPROC(atomic64_add_ret_unchecked)
10374 +
10375 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
10376 BACKOFF_SETUP(%o2)
10377 1: ldx [%o1], %g1
10378 - sub %g1, %o0, %g7
10379 + subcc %g1, %o0, %g7
10380 +
10381 +#ifdef CONFIG_PAX_REFCOUNT
10382 + tvs %xcc, 6
10383 +#endif
10384 +
10385 casx [%o1], %g1, %g7
10386 cmp %g1, %g7
10387 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
10388 diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
10389 index 323335b..ed85ea2 100644
10390 --- a/arch/sparc/lib/ksyms.c
10391 +++ b/arch/sparc/lib/ksyms.c
10392 @@ -100,12 +100,18 @@ EXPORT_SYMBOL(__clear_user);
10393
10394 /* Atomic counter implementation. */
10395 EXPORT_SYMBOL(atomic_add);
10396 +EXPORT_SYMBOL(atomic_add_unchecked);
10397 EXPORT_SYMBOL(atomic_add_ret);
10398 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
10399 EXPORT_SYMBOL(atomic_sub);
10400 +EXPORT_SYMBOL(atomic_sub_unchecked);
10401 EXPORT_SYMBOL(atomic_sub_ret);
10402 EXPORT_SYMBOL(atomic64_add);
10403 +EXPORT_SYMBOL(atomic64_add_unchecked);
10404 EXPORT_SYMBOL(atomic64_add_ret);
10405 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
10406 EXPORT_SYMBOL(atomic64_sub);
10407 +EXPORT_SYMBOL(atomic64_sub_unchecked);
10408 EXPORT_SYMBOL(atomic64_sub_ret);
10409 EXPORT_SYMBOL(atomic64_dec_if_positive);
10410
10411 diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
10412 index 30c3ecc..736f015 100644
10413 --- a/arch/sparc/mm/Makefile
10414 +++ b/arch/sparc/mm/Makefile
10415 @@ -2,7 +2,7 @@
10416 #
10417
10418 asflags-y := -ansi
10419 -ccflags-y := -Werror
10420 +#ccflags-y := -Werror
10421
10422 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
10423 obj-y += fault_$(BITS).o
10424 diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
10425 index 59dbd46..1dd7f5e 100644
10426 --- a/arch/sparc/mm/fault_32.c
10427 +++ b/arch/sparc/mm/fault_32.c
10428 @@ -21,6 +21,9 @@
10429 #include <linux/perf_event.h>
10430 #include <linux/interrupt.h>
10431 #include <linux/kdebug.h>
10432 +#include <linux/slab.h>
10433 +#include <linux/pagemap.h>
10434 +#include <linux/compiler.h>
10435
10436 #include <asm/page.h>
10437 #include <asm/pgtable.h>
10438 @@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
10439 return safe_compute_effective_address(regs, insn);
10440 }
10441
10442 +#ifdef CONFIG_PAX_PAGEEXEC
10443 +#ifdef CONFIG_PAX_DLRESOLVE
10444 +static void pax_emuplt_close(struct vm_area_struct *vma)
10445 +{
10446 + vma->vm_mm->call_dl_resolve = 0UL;
10447 +}
10448 +
10449 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10450 +{
10451 + unsigned int *kaddr;
10452 +
10453 + vmf->page = alloc_page(GFP_HIGHUSER);
10454 + if (!vmf->page)
10455 + return VM_FAULT_OOM;
10456 +
10457 + kaddr = kmap(vmf->page);
10458 + memset(kaddr, 0, PAGE_SIZE);
10459 + kaddr[0] = 0x9DE3BFA8U; /* save */
10460 + flush_dcache_page(vmf->page);
10461 + kunmap(vmf->page);
10462 + return VM_FAULT_MAJOR;
10463 +}
10464 +
10465 +static const struct vm_operations_struct pax_vm_ops = {
10466 + .close = pax_emuplt_close,
10467 + .fault = pax_emuplt_fault
10468 +};
10469 +
10470 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10471 +{
10472 + int ret;
10473 +
10474 + INIT_LIST_HEAD(&vma->anon_vma_chain);
10475 + vma->vm_mm = current->mm;
10476 + vma->vm_start = addr;
10477 + vma->vm_end = addr + PAGE_SIZE;
10478 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10479 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10480 + vma->vm_ops = &pax_vm_ops;
10481 +
10482 + ret = insert_vm_struct(current->mm, vma);
10483 + if (ret)
10484 + return ret;
10485 +
10486 + ++current->mm->total_vm;
10487 + return 0;
10488 +}
10489 +#endif
10490 +
10491 +/*
10492 + * PaX: decide what to do with offenders (regs->pc = fault address)
10493 + *
10494 + * returns 1 when task should be killed
10495 + * 2 when patched PLT trampoline was detected
10496 + * 3 when unpatched PLT trampoline was detected
10497 + */
10498 +static int pax_handle_fetch_fault(struct pt_regs *regs)
10499 +{
10500 +
10501 +#ifdef CONFIG_PAX_EMUPLT
10502 + int err;
10503 +
10504 + do { /* PaX: patched PLT emulation #1 */
10505 + unsigned int sethi1, sethi2, jmpl;
10506 +
10507 + err = get_user(sethi1, (unsigned int *)regs->pc);
10508 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
10509 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
10510 +
10511 + if (err)
10512 + break;
10513 +
10514 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10515 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
10516 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
10517 + {
10518 + unsigned int addr;
10519 +
10520 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10521 + addr = regs->u_regs[UREG_G1];
10522 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10523 + regs->pc = addr;
10524 + regs->npc = addr+4;
10525 + return 2;
10526 + }
10527 + } while (0);
10528 +
10529 + do { /* PaX: patched PLT emulation #2 */
10530 + unsigned int ba;
10531 +
10532 + err = get_user(ba, (unsigned int *)regs->pc);
10533 +
10534 + if (err)
10535 + break;
10536 +
10537 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10538 + unsigned int addr;
10539 +
10540 + if ((ba & 0xFFC00000U) == 0x30800000U)
10541 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10542 + else
10543 + addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10544 + regs->pc = addr;
10545 + regs->npc = addr+4;
10546 + return 2;
10547 + }
10548 + } while (0);
10549 +
10550 + do { /* PaX: patched PLT emulation #3 */
10551 + unsigned int sethi, bajmpl, nop;
10552 +
10553 + err = get_user(sethi, (unsigned int *)regs->pc);
10554 + err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
10555 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
10556 +
10557 + if (err)
10558 + break;
10559 +
10560 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
10561 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10562 + nop == 0x01000000U)
10563 + {
10564 + unsigned int addr;
10565 +
10566 + addr = (sethi & 0x003FFFFFU) << 10;
10567 + regs->u_regs[UREG_G1] = addr;
10568 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10569 + addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10570 + else
10571 + addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10572 + regs->pc = addr;
10573 + regs->npc = addr+4;
10574 + return 2;
10575 + }
10576 + } while (0);
10577 +
10578 + do { /* PaX: unpatched PLT emulation step 1 */
10579 + unsigned int sethi, ba, nop;
10580 +
10581 + err = get_user(sethi, (unsigned int *)regs->pc);
10582 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
10583 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
10584 +
10585 + if (err)
10586 + break;
10587 +
10588 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
10589 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
10590 + nop == 0x01000000U)
10591 + {
10592 + unsigned int addr, save, call;
10593 +
10594 + if ((ba & 0xFFC00000U) == 0x30800000U)
10595 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
10596 + else
10597 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
10598 +
10599 + err = get_user(save, (unsigned int *)addr);
10600 + err |= get_user(call, (unsigned int *)(addr+4));
10601 + err |= get_user(nop, (unsigned int *)(addr+8));
10602 + if (err)
10603 + break;
10604 +
10605 +#ifdef CONFIG_PAX_DLRESOLVE
10606 + if (save == 0x9DE3BFA8U &&
10607 + (call & 0xC0000000U) == 0x40000000U &&
10608 + nop == 0x01000000U)
10609 + {
10610 + struct vm_area_struct *vma;
10611 + unsigned long call_dl_resolve;
10612 +
10613 + down_read(&current->mm->mmap_sem);
10614 + call_dl_resolve = current->mm->call_dl_resolve;
10615 + up_read(&current->mm->mmap_sem);
10616 + if (likely(call_dl_resolve))
10617 + goto emulate;
10618 +
10619 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
10620 +
10621 + down_write(&current->mm->mmap_sem);
10622 + if (current->mm->call_dl_resolve) {
10623 + call_dl_resolve = current->mm->call_dl_resolve;
10624 + up_write(&current->mm->mmap_sem);
10625 + if (vma)
10626 + kmem_cache_free(vm_area_cachep, vma);
10627 + goto emulate;
10628 + }
10629 +
10630 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
10631 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
10632 + up_write(&current->mm->mmap_sem);
10633 + if (vma)
10634 + kmem_cache_free(vm_area_cachep, vma);
10635 + return 1;
10636 + }
10637 +
10638 + if (pax_insert_vma(vma, call_dl_resolve)) {
10639 + up_write(&current->mm->mmap_sem);
10640 + kmem_cache_free(vm_area_cachep, vma);
10641 + return 1;
10642 + }
10643 +
10644 + current->mm->call_dl_resolve = call_dl_resolve;
10645 + up_write(&current->mm->mmap_sem);
10646 +
10647 +emulate:
10648 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10649 + regs->pc = call_dl_resolve;
10650 + regs->npc = addr+4;
10651 + return 3;
10652 + }
10653 +#endif
10654 +
10655 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
10656 + if ((save & 0xFFC00000U) == 0x05000000U &&
10657 + (call & 0xFFFFE000U) == 0x85C0A000U &&
10658 + nop == 0x01000000U)
10659 + {
10660 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
10661 + regs->u_regs[UREG_G2] = addr + 4;
10662 + addr = (save & 0x003FFFFFU) << 10;
10663 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
10664 + regs->pc = addr;
10665 + regs->npc = addr+4;
10666 + return 3;
10667 + }
10668 + }
10669 + } while (0);
10670 +
10671 + do { /* PaX: unpatched PLT emulation step 2 */
10672 + unsigned int save, call, nop;
10673 +
10674 + err = get_user(save, (unsigned int *)(regs->pc-4));
10675 + err |= get_user(call, (unsigned int *)regs->pc);
10676 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
10677 + if (err)
10678 + break;
10679 +
10680 + if (save == 0x9DE3BFA8U &&
10681 + (call & 0xC0000000U) == 0x40000000U &&
10682 + nop == 0x01000000U)
10683 + {
10684 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
10685 +
10686 + regs->u_regs[UREG_RETPC] = regs->pc;
10687 + regs->pc = dl_resolve;
10688 + regs->npc = dl_resolve+4;
10689 + return 3;
10690 + }
10691 + } while (0);
10692 +#endif
10693 +
10694 + return 1;
10695 +}
10696 +
10697 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
10698 +{
10699 + unsigned long i;
10700 +
10701 + printk(KERN_ERR "PAX: bytes at PC: ");
10702 + for (i = 0; i < 8; i++) {
10703 + unsigned int c;
10704 + if (get_user(c, (unsigned int *)pc+i))
10705 + printk(KERN_CONT "???????? ");
10706 + else
10707 + printk(KERN_CONT "%08x ", c);
10708 + }
10709 + printk("\n");
10710 +}
10711 +#endif
10712 +
10713 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
10714 int text_fault)
10715 {
10716 @@ -229,6 +503,24 @@ good_area:
10717 if (!(vma->vm_flags & VM_WRITE))
10718 goto bad_area;
10719 } else {
10720 +
10721 +#ifdef CONFIG_PAX_PAGEEXEC
10722 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
10723 + up_read(&mm->mmap_sem);
10724 + switch (pax_handle_fetch_fault(regs)) {
10725 +
10726 +#ifdef CONFIG_PAX_EMUPLT
10727 + case 2:
10728 + case 3:
10729 + return;
10730 +#endif
10731 +
10732 + }
10733 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
10734 + do_group_exit(SIGKILL);
10735 + }
10736 +#endif
10737 +
10738 /* Allow reads even for write-only mappings */
10739 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
10740 goto bad_area;
10741 diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
10742 index 2ebec26..b212598 100644
10743 --- a/arch/sparc/mm/fault_64.c
10744 +++ b/arch/sparc/mm/fault_64.c
10745 @@ -21,6 +21,9 @@
10746 #include <linux/kprobes.h>
10747 #include <linux/kdebug.h>
10748 #include <linux/percpu.h>
10749 +#include <linux/slab.h>
10750 +#include <linux/pagemap.h>
10751 +#include <linux/compiler.h>
10752
10753 #include <asm/page.h>
10754 #include <asm/pgtable.h>
10755 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
10756 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
10757 regs->tpc);
10758 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
10759 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
10760 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
10761 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
10762 dump_stack();
10763 unhandled_fault(regs->tpc, current, regs);
10764 @@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
10765 show_regs(regs);
10766 }
10767
10768 +#ifdef CONFIG_PAX_PAGEEXEC
10769 +#ifdef CONFIG_PAX_DLRESOLVE
10770 +static void pax_emuplt_close(struct vm_area_struct *vma)
10771 +{
10772 + vma->vm_mm->call_dl_resolve = 0UL;
10773 +}
10774 +
10775 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
10776 +{
10777 + unsigned int *kaddr;
10778 +
10779 + vmf->page = alloc_page(GFP_HIGHUSER);
10780 + if (!vmf->page)
10781 + return VM_FAULT_OOM;
10782 +
10783 + kaddr = kmap(vmf->page);
10784 + memset(kaddr, 0, PAGE_SIZE);
10785 + kaddr[0] = 0x9DE3BFA8U; /* save */
10786 + flush_dcache_page(vmf->page);
10787 + kunmap(vmf->page);
10788 + return VM_FAULT_MAJOR;
10789 +}
10790 +
10791 +static const struct vm_operations_struct pax_vm_ops = {
10792 + .close = pax_emuplt_close,
10793 + .fault = pax_emuplt_fault
10794 +};
10795 +
10796 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
10797 +{
10798 + int ret;
10799 +
10800 + INIT_LIST_HEAD(&vma->anon_vma_chain);
10801 + vma->vm_mm = current->mm;
10802 + vma->vm_start = addr;
10803 + vma->vm_end = addr + PAGE_SIZE;
10804 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
10805 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
10806 + vma->vm_ops = &pax_vm_ops;
10807 +
10808 + ret = insert_vm_struct(current->mm, vma);
10809 + if (ret)
10810 + return ret;
10811 +
10812 + ++current->mm->total_vm;
10813 + return 0;
10814 +}
10815 +#endif
10816 +
10817 +/*
10818 + * PaX: decide what to do with offenders (regs->tpc = fault address)
10819 + *
10820 + * returns 1 when task should be killed
10821 + * 2 when patched PLT trampoline was detected
10822 + * 3 when unpatched PLT trampoline was detected
10823 + */
10824 +static int pax_handle_fetch_fault(struct pt_regs *regs)
10825 +{
10826 +
10827 +#ifdef CONFIG_PAX_EMUPLT
10828 + int err;
10829 +
10830 + do { /* PaX: patched PLT emulation #1 */
10831 + unsigned int sethi1, sethi2, jmpl;
10832 +
10833 + err = get_user(sethi1, (unsigned int *)regs->tpc);
10834 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
10835 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
10836 +
10837 + if (err)
10838 + break;
10839 +
10840 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
10841 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
10842 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
10843 + {
10844 + unsigned long addr;
10845 +
10846 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
10847 + addr = regs->u_regs[UREG_G1];
10848 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10849 +
10850 + if (test_thread_flag(TIF_32BIT))
10851 + addr &= 0xFFFFFFFFUL;
10852 +
10853 + regs->tpc = addr;
10854 + regs->tnpc = addr+4;
10855 + return 2;
10856 + }
10857 + } while (0);
10858 +
10859 + do { /* PaX: patched PLT emulation #2 */
10860 + unsigned int ba;
10861 +
10862 + err = get_user(ba, (unsigned int *)regs->tpc);
10863 +
10864 + if (err)
10865 + break;
10866 +
10867 + if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
10868 + unsigned long addr;
10869 +
10870 + if ((ba & 0xFFC00000U) == 0x30800000U)
10871 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
10872 + else
10873 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10874 +
10875 + if (test_thread_flag(TIF_32BIT))
10876 + addr &= 0xFFFFFFFFUL;
10877 +
10878 + regs->tpc = addr;
10879 + regs->tnpc = addr+4;
10880 + return 2;
10881 + }
10882 + } while (0);
10883 +
10884 + do { /* PaX: patched PLT emulation #3 */
10885 + unsigned int sethi, bajmpl, nop;
10886 +
10887 + err = get_user(sethi, (unsigned int *)regs->tpc);
10888 + err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
10889 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
10890 +
10891 + if (err)
10892 + break;
10893 +
10894 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
10895 + ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
10896 + nop == 0x01000000U)
10897 + {
10898 + unsigned long addr;
10899 +
10900 + addr = (sethi & 0x003FFFFFU) << 10;
10901 + regs->u_regs[UREG_G1] = addr;
10902 + if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
10903 + addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
10904 + else
10905 + addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
10906 +
10907 + if (test_thread_flag(TIF_32BIT))
10908 + addr &= 0xFFFFFFFFUL;
10909 +
10910 + regs->tpc = addr;
10911 + regs->tnpc = addr+4;
10912 + return 2;
10913 + }
10914 + } while (0);
10915 +
10916 + do { /* PaX: patched PLT emulation #4 */
10917 + unsigned int sethi, mov1, call, mov2;
10918 +
10919 + err = get_user(sethi, (unsigned int *)regs->tpc);
10920 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
10921 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
10922 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
10923 +
10924 + if (err)
10925 + break;
10926 +
10927 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
10928 + mov1 == 0x8210000FU &&
10929 + (call & 0xC0000000U) == 0x40000000U &&
10930 + mov2 == 0x9E100001U)
10931 + {
10932 + unsigned long addr;
10933 +
10934 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
10935 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
10936 +
10937 + if (test_thread_flag(TIF_32BIT))
10938 + addr &= 0xFFFFFFFFUL;
10939 +
10940 + regs->tpc = addr;
10941 + regs->tnpc = addr+4;
10942 + return 2;
10943 + }
10944 + } while (0);
10945 +
10946 + do { /* PaX: patched PLT emulation #5 */
10947 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
10948 +
10949 + err = get_user(sethi, (unsigned int *)regs->tpc);
10950 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10951 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10952 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
10953 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
10954 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
10955 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
10956 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
10957 +
10958 + if (err)
10959 + break;
10960 +
10961 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
10962 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
10963 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10964 + (or1 & 0xFFFFE000U) == 0x82106000U &&
10965 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
10966 + sllx == 0x83287020U &&
10967 + jmpl == 0x81C04005U &&
10968 + nop == 0x01000000U)
10969 + {
10970 + unsigned long addr;
10971 +
10972 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
10973 + regs->u_regs[UREG_G1] <<= 32;
10974 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
10975 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
10976 + regs->tpc = addr;
10977 + regs->tnpc = addr+4;
10978 + return 2;
10979 + }
10980 + } while (0);
10981 +
10982 + do { /* PaX: patched PLT emulation #6 */
10983 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
10984 +
10985 + err = get_user(sethi, (unsigned int *)regs->tpc);
10986 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
10987 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
10988 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
10989 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
10990 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
10991 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
10992 +
10993 + if (err)
10994 + break;
10995 +
10996 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
10997 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
10998 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
10999 + sllx == 0x83287020U &&
11000 + (or & 0xFFFFE000U) == 0x8A116000U &&
11001 + jmpl == 0x81C04005U &&
11002 + nop == 0x01000000U)
11003 + {
11004 + unsigned long addr;
11005 +
11006 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
11007 + regs->u_regs[UREG_G1] <<= 32;
11008 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
11009 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
11010 + regs->tpc = addr;
11011 + regs->tnpc = addr+4;
11012 + return 2;
11013 + }
11014 + } while (0);
11015 +
11016 + do { /* PaX: unpatched PLT emulation step 1 */
11017 + unsigned int sethi, ba, nop;
11018 +
11019 + err = get_user(sethi, (unsigned int *)regs->tpc);
11020 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11021 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11022 +
11023 + if (err)
11024 + break;
11025 +
11026 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11027 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
11028 + nop == 0x01000000U)
11029 + {
11030 + unsigned long addr;
11031 + unsigned int save, call;
11032 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
11033 +
11034 + if ((ba & 0xFFC00000U) == 0x30800000U)
11035 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
11036 + else
11037 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11038 +
11039 + if (test_thread_flag(TIF_32BIT))
11040 + addr &= 0xFFFFFFFFUL;
11041 +
11042 + err = get_user(save, (unsigned int *)addr);
11043 + err |= get_user(call, (unsigned int *)(addr+4));
11044 + err |= get_user(nop, (unsigned int *)(addr+8));
11045 + if (err)
11046 + break;
11047 +
11048 +#ifdef CONFIG_PAX_DLRESOLVE
11049 + if (save == 0x9DE3BFA8U &&
11050 + (call & 0xC0000000U) == 0x40000000U &&
11051 + nop == 0x01000000U)
11052 + {
11053 + struct vm_area_struct *vma;
11054 + unsigned long call_dl_resolve;
11055 +
11056 + down_read(&current->mm->mmap_sem);
11057 + call_dl_resolve = current->mm->call_dl_resolve;
11058 + up_read(&current->mm->mmap_sem);
11059 + if (likely(call_dl_resolve))
11060 + goto emulate;
11061 +
11062 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
11063 +
11064 + down_write(&current->mm->mmap_sem);
11065 + if (current->mm->call_dl_resolve) {
11066 + call_dl_resolve = current->mm->call_dl_resolve;
11067 + up_write(&current->mm->mmap_sem);
11068 + if (vma)
11069 + kmem_cache_free(vm_area_cachep, vma);
11070 + goto emulate;
11071 + }
11072 +
11073 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
11074 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
11075 + up_write(&current->mm->mmap_sem);
11076 + if (vma)
11077 + kmem_cache_free(vm_area_cachep, vma);
11078 + return 1;
11079 + }
11080 +
11081 + if (pax_insert_vma(vma, call_dl_resolve)) {
11082 + up_write(&current->mm->mmap_sem);
11083 + kmem_cache_free(vm_area_cachep, vma);
11084 + return 1;
11085 + }
11086 +
11087 + current->mm->call_dl_resolve = call_dl_resolve;
11088 + up_write(&current->mm->mmap_sem);
11089 +
11090 +emulate:
11091 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11092 + regs->tpc = call_dl_resolve;
11093 + regs->tnpc = addr+4;
11094 + return 3;
11095 + }
11096 +#endif
11097 +
11098 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
11099 + if ((save & 0xFFC00000U) == 0x05000000U &&
11100 + (call & 0xFFFFE000U) == 0x85C0A000U &&
11101 + nop == 0x01000000U)
11102 + {
11103 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11104 + regs->u_regs[UREG_G2] = addr + 4;
11105 + addr = (save & 0x003FFFFFU) << 10;
11106 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
11107 +
11108 + if (test_thread_flag(TIF_32BIT))
11109 + addr &= 0xFFFFFFFFUL;
11110 +
11111 + regs->tpc = addr;
11112 + regs->tnpc = addr+4;
11113 + return 3;
11114 + }
11115 +
11116 + /* PaX: 64-bit PLT stub */
11117 + err = get_user(sethi1, (unsigned int *)addr);
11118 + err |= get_user(sethi2, (unsigned int *)(addr+4));
11119 + err |= get_user(or1, (unsigned int *)(addr+8));
11120 + err |= get_user(or2, (unsigned int *)(addr+12));
11121 + err |= get_user(sllx, (unsigned int *)(addr+16));
11122 + err |= get_user(add, (unsigned int *)(addr+20));
11123 + err |= get_user(jmpl, (unsigned int *)(addr+24));
11124 + err |= get_user(nop, (unsigned int *)(addr+28));
11125 + if (err)
11126 + break;
11127 +
11128 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
11129 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
11130 + (or1 & 0xFFFFE000U) == 0x88112000U &&
11131 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
11132 + sllx == 0x89293020U &&
11133 + add == 0x8A010005U &&
11134 + jmpl == 0x89C14000U &&
11135 + nop == 0x01000000U)
11136 + {
11137 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
11138 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
11139 + regs->u_regs[UREG_G4] <<= 32;
11140 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
11141 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
11142 + regs->u_regs[UREG_G4] = addr + 24;
11143 + addr = regs->u_regs[UREG_G5];
11144 + regs->tpc = addr;
11145 + regs->tnpc = addr+4;
11146 + return 3;
11147 + }
11148 + }
11149 + } while (0);
11150 +
11151 +#ifdef CONFIG_PAX_DLRESOLVE
11152 + do { /* PaX: unpatched PLT emulation step 2 */
11153 + unsigned int save, call, nop;
11154 +
11155 + err = get_user(save, (unsigned int *)(regs->tpc-4));
11156 + err |= get_user(call, (unsigned int *)regs->tpc);
11157 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
11158 + if (err)
11159 + break;
11160 +
11161 + if (save == 0x9DE3BFA8U &&
11162 + (call & 0xC0000000U) == 0x40000000U &&
11163 + nop == 0x01000000U)
11164 + {
11165 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
11166 +
11167 + if (test_thread_flag(TIF_32BIT))
11168 + dl_resolve &= 0xFFFFFFFFUL;
11169 +
11170 + regs->u_regs[UREG_RETPC] = regs->tpc;
11171 + regs->tpc = dl_resolve;
11172 + regs->tnpc = dl_resolve+4;
11173 + return 3;
11174 + }
11175 + } while (0);
11176 +#endif
11177 +
11178 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
11179 + unsigned int sethi, ba, nop;
11180 +
11181 + err = get_user(sethi, (unsigned int *)regs->tpc);
11182 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
11183 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
11184 +
11185 + if (err)
11186 + break;
11187 +
11188 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
11189 + (ba & 0xFFF00000U) == 0x30600000U &&
11190 + nop == 0x01000000U)
11191 + {
11192 + unsigned long addr;
11193 +
11194 + addr = (sethi & 0x003FFFFFU) << 10;
11195 + regs->u_regs[UREG_G1] = addr;
11196 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
11197 +
11198 + if (test_thread_flag(TIF_32BIT))
11199 + addr &= 0xFFFFFFFFUL;
11200 +
11201 + regs->tpc = addr;
11202 + regs->tnpc = addr+4;
11203 + return 2;
11204 + }
11205 + } while (0);
11206 +
11207 +#endif
11208 +
11209 + return 1;
11210 +}
11211 +
11212 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
11213 +{
11214 + unsigned long i;
11215 +
11216 + printk(KERN_ERR "PAX: bytes at PC: ");
11217 + for (i = 0; i < 8; i++) {
11218 + unsigned int c;
11219 + if (get_user(c, (unsigned int *)pc+i))
11220 + printk(KERN_CONT "???????? ");
11221 + else
11222 + printk(KERN_CONT "%08x ", c);
11223 + }
11224 + printk("\n");
11225 +}
11226 +#endif
11227 +
11228 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
11229 {
11230 struct mm_struct *mm = current->mm;
11231 @@ -342,6 +805,29 @@ retry:
11232 if (!vma)
11233 goto bad_area;
11234
11235 +#ifdef CONFIG_PAX_PAGEEXEC
11236 + /* PaX: detect ITLB misses on non-exec pages */
11237 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
11238 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
11239 + {
11240 + if (address != regs->tpc)
11241 + goto good_area;
11242 +
11243 + up_read(&mm->mmap_sem);
11244 + switch (pax_handle_fetch_fault(regs)) {
11245 +
11246 +#ifdef CONFIG_PAX_EMUPLT
11247 + case 2:
11248 + case 3:
11249 + return;
11250 +#endif
11251 +
11252 + }
11253 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
11254 + do_group_exit(SIGKILL);
11255 + }
11256 +#endif
11257 +
11258 /* Pure DTLB misses do not tell us whether the fault causing
11259 * load/store/atomic was a write or not, it only says that there
11260 * was no match. So in such a case we (carefully) read the
11261 diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
11262 index 9639964..806cd0c 100644
11263 --- a/arch/sparc/mm/hugetlbpage.c
11264 +++ b/arch/sparc/mm/hugetlbpage.c
11265 @@ -28,7 +28,8 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11266 unsigned long addr,
11267 unsigned long len,
11268 unsigned long pgoff,
11269 - unsigned long flags)
11270 + unsigned long flags,
11271 + unsigned long offset)
11272 {
11273 unsigned long task_size = TASK_SIZE;
11274 struct vm_unmapped_area_info info;
11275 @@ -38,15 +39,22 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
11276
11277 info.flags = 0;
11278 info.length = len;
11279 - info.low_limit = TASK_UNMAPPED_BASE;
11280 + info.low_limit = mm->mmap_base;
11281 info.high_limit = min(task_size, VA_EXCLUDE_START);
11282 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11283 info.align_offset = 0;
11284 + info.threadstack_offset = offset;
11285 addr = vm_unmapped_area(&info);
11286
11287 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
11288 VM_BUG_ON(addr != -ENOMEM);
11289 info.low_limit = VA_EXCLUDE_END;
11290 +
11291 +#ifdef CONFIG_PAX_RANDMMAP
11292 + if (mm->pax_flags & MF_PAX_RANDMMAP)
11293 + info.low_limit += mm->delta_mmap;
11294 +#endif
11295 +
11296 info.high_limit = task_size;
11297 addr = vm_unmapped_area(&info);
11298 }
11299 @@ -58,7 +66,8 @@ static unsigned long
11300 hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11301 const unsigned long len,
11302 const unsigned long pgoff,
11303 - const unsigned long flags)
11304 + const unsigned long flags,
11305 + const unsigned long offset)
11306 {
11307 struct mm_struct *mm = current->mm;
11308 unsigned long addr = addr0;
11309 @@ -73,6 +82,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11310 info.high_limit = mm->mmap_base;
11311 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
11312 info.align_offset = 0;
11313 + info.threadstack_offset = offset;
11314 addr = vm_unmapped_area(&info);
11315
11316 /*
11317 @@ -85,6 +95,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
11318 VM_BUG_ON(addr != -ENOMEM);
11319 info.flags = 0;
11320 info.low_limit = TASK_UNMAPPED_BASE;
11321 +
11322 +#ifdef CONFIG_PAX_RANDMMAP
11323 + if (mm->pax_flags & MF_PAX_RANDMMAP)
11324 + info.low_limit += mm->delta_mmap;
11325 +#endif
11326 +
11327 info.high_limit = STACK_TOP32;
11328 addr = vm_unmapped_area(&info);
11329 }
11330 @@ -99,6 +115,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11331 struct mm_struct *mm = current->mm;
11332 struct vm_area_struct *vma;
11333 unsigned long task_size = TASK_SIZE;
11334 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
11335
11336 if (test_thread_flag(TIF_32BIT))
11337 task_size = STACK_TOP32;
11338 @@ -114,19 +131,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
11339 return addr;
11340 }
11341
11342 +#ifdef CONFIG_PAX_RANDMMAP
11343 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
11344 +#endif
11345 +
11346 if (addr) {
11347 addr = ALIGN(addr, HPAGE_SIZE);
11348 vma = find_vma(mm, addr);
11349 - if (task_size - len >= addr &&
11350 - (!vma || addr + len <= vma->vm_start))
11351 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
11352 return addr;
11353 }
11354 if (mm->get_unmapped_area == arch_get_unmapped_area)
11355 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
11356 - pgoff, flags);
11357 + pgoff, flags, offset);
11358 else
11359 return hugetlb_get_unmapped_area_topdown(file, addr, len,
11360 - pgoff, flags);
11361 + pgoff, flags, offset);
11362 }
11363
11364 pte_t *huge_pte_alloc(struct mm_struct *mm,
11365 diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
11366 index ed82eda..0d80e77 100644
11367 --- a/arch/sparc/mm/init_64.c
11368 +++ b/arch/sparc/mm/init_64.c
11369 @@ -188,9 +188,9 @@ unsigned long sparc64_kern_sec_context __read_mostly;
11370 int num_kernel_image_mappings;
11371
11372 #ifdef CONFIG_DEBUG_DCFLUSH
11373 -atomic_t dcpage_flushes = ATOMIC_INIT(0);
11374 +atomic_unchecked_t dcpage_flushes = ATOMIC_INIT(0);
11375 #ifdef CONFIG_SMP
11376 -atomic_t dcpage_flushes_xcall = ATOMIC_INIT(0);
11377 +atomic_unchecked_t dcpage_flushes_xcall = ATOMIC_INIT(0);
11378 #endif
11379 #endif
11380
11381 @@ -198,7 +198,7 @@ inline void flush_dcache_page_impl(struct page *page)
11382 {
11383 BUG_ON(tlb_type == hypervisor);
11384 #ifdef CONFIG_DEBUG_DCFLUSH
11385 - atomic_inc(&dcpage_flushes);
11386 + atomic_inc_unchecked(&dcpage_flushes);
11387 #endif
11388
11389 #ifdef DCACHE_ALIASING_POSSIBLE
11390 @@ -466,10 +466,10 @@ void mmu_info(struct seq_file *m)
11391
11392 #ifdef CONFIG_DEBUG_DCFLUSH
11393 seq_printf(m, "DCPageFlushes\t: %d\n",
11394 - atomic_read(&dcpage_flushes));
11395 + atomic_read_unchecked(&dcpage_flushes));
11396 #ifdef CONFIG_SMP
11397 seq_printf(m, "DCPageFlushesXC\t: %d\n",
11398 - atomic_read(&dcpage_flushes_xcall));
11399 + atomic_read_unchecked(&dcpage_flushes_xcall));
11400 #endif /* CONFIG_SMP */
11401 #endif /* CONFIG_DEBUG_DCFLUSH */
11402 }
11403 diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
11404 index d45a2c4..3c05a78 100644
11405 --- a/arch/tile/Kconfig
11406 +++ b/arch/tile/Kconfig
11407 @@ -185,6 +185,7 @@ source "kernel/Kconfig.hz"
11408
11409 config KEXEC
11410 bool "kexec system call"
11411 + depends on !GRKERNSEC_KMEM
11412 ---help---
11413 kexec is a system call that implements the ability to shutdown your
11414 current kernel, and to start another kernel. It is like a reboot
11415 diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
11416 index ad220ee..2f537b3 100644
11417 --- a/arch/tile/include/asm/atomic_64.h
11418 +++ b/arch/tile/include/asm/atomic_64.h
11419 @@ -105,6 +105,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
11420
11421 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
11422
11423 +#define atomic64_read_unchecked(v) atomic64_read(v)
11424 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
11425 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
11426 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
11427 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
11428 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
11429 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
11430 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
11431 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
11432 +
11433 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
11434 #define smp_mb__before_atomic_dec() smp_mb()
11435 #define smp_mb__after_atomic_dec() smp_mb()
11436 diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
11437 index 6160761..00cac88 100644
11438 --- a/arch/tile/include/asm/cache.h
11439 +++ b/arch/tile/include/asm/cache.h
11440 @@ -15,11 +15,12 @@
11441 #ifndef _ASM_TILE_CACHE_H
11442 #define _ASM_TILE_CACHE_H
11443
11444 +#include <linux/const.h>
11445 #include <arch/chip.h>
11446
11447 /* bytes per L1 data cache line */
11448 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
11449 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11450 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11451
11452 /* bytes per L2 cache line */
11453 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
11454 diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
11455 index b6cde32..c0cb736 100644
11456 --- a/arch/tile/include/asm/uaccess.h
11457 +++ b/arch/tile/include/asm/uaccess.h
11458 @@ -414,9 +414,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
11459 const void __user *from,
11460 unsigned long n)
11461 {
11462 - int sz = __compiletime_object_size(to);
11463 + size_t sz = __compiletime_object_size(to);
11464
11465 - if (likely(sz == -1 || sz >= n))
11466 + if (likely(sz == (size_t)-1 || sz >= n))
11467 n = _copy_from_user(to, from, n);
11468 else
11469 copy_from_user_overflow();
11470 diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
11471 index 0cb3bba..7338b2d 100644
11472 --- a/arch/tile/mm/hugetlbpage.c
11473 +++ b/arch/tile/mm/hugetlbpage.c
11474 @@ -212,6 +212,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
11475 info.high_limit = TASK_SIZE;
11476 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11477 info.align_offset = 0;
11478 + info.threadstack_offset = 0;
11479 return vm_unmapped_area(&info);
11480 }
11481
11482 @@ -229,6 +230,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
11483 info.high_limit = current->mm->mmap_base;
11484 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
11485 info.align_offset = 0;
11486 + info.threadstack_offset = 0;
11487 addr = vm_unmapped_area(&info);
11488
11489 /*
11490 diff --git a/arch/um/Makefile b/arch/um/Makefile
11491 index 133f7de..1d6f2f1 100644
11492 --- a/arch/um/Makefile
11493 +++ b/arch/um/Makefile
11494 @@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
11495 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
11496 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
11497
11498 +ifdef CONSTIFY_PLUGIN
11499 +USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11500 +endif
11501 +
11502 #This will adjust *FLAGS accordingly to the platform.
11503 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
11504
11505 diff --git a/arch/um/defconfig b/arch/um/defconfig
11506 index 2665e6b..3e3822b 100644
11507 --- a/arch/um/defconfig
11508 +++ b/arch/um/defconfig
11509 @@ -51,7 +51,6 @@ CONFIG_X86_CMPXCHG=y
11510 CONFIG_X86_L1_CACHE_SHIFT=5
11511 CONFIG_X86_XADD=y
11512 CONFIG_X86_PPRO_FENCE=y
11513 -CONFIG_X86_WP_WORKS_OK=y
11514 CONFIG_X86_INVLPG=y
11515 CONFIG_X86_BSWAP=y
11516 CONFIG_X86_POPAD_OK=y
11517 diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
11518 index 19e1bdd..3665b77 100644
11519 --- a/arch/um/include/asm/cache.h
11520 +++ b/arch/um/include/asm/cache.h
11521 @@ -1,6 +1,7 @@
11522 #ifndef __UM_CACHE_H
11523 #define __UM_CACHE_H
11524
11525 +#include <linux/const.h>
11526
11527 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
11528 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
11529 @@ -12,6 +13,6 @@
11530 # define L1_CACHE_SHIFT 5
11531 #endif
11532
11533 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11534 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11535
11536 #endif
11537 diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
11538 index 2e0a6b1..a64d0f5 100644
11539 --- a/arch/um/include/asm/kmap_types.h
11540 +++ b/arch/um/include/asm/kmap_types.h
11541 @@ -8,6 +8,6 @@
11542
11543 /* No more #include "asm/arch/kmap_types.h" ! */
11544
11545 -#define KM_TYPE_NR 14
11546 +#define KM_TYPE_NR 15
11547
11548 #endif
11549 diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
11550 index 5ff53d9..5850cdf 100644
11551 --- a/arch/um/include/asm/page.h
11552 +++ b/arch/um/include/asm/page.h
11553 @@ -14,6 +14,9 @@
11554 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
11555 #define PAGE_MASK (~(PAGE_SIZE-1))
11556
11557 +#define ktla_ktva(addr) (addr)
11558 +#define ktva_ktla(addr) (addr)
11559 +
11560 #ifndef __ASSEMBLY__
11561
11562 struct page;
11563 diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
11564 index 0032f92..cd151e0 100644
11565 --- a/arch/um/include/asm/pgtable-3level.h
11566 +++ b/arch/um/include/asm/pgtable-3level.h
11567 @@ -58,6 +58,7 @@
11568 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
11569 #define pud_populate(mm, pud, pmd) \
11570 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
11571 +#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
11572
11573 #ifdef CONFIG_64BIT
11574 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
11575 diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
11576 index bbcef52..6a2a483 100644
11577 --- a/arch/um/kernel/process.c
11578 +++ b/arch/um/kernel/process.c
11579 @@ -367,22 +367,6 @@ int singlestepping(void * t)
11580 return 2;
11581 }
11582
11583 -/*
11584 - * Only x86 and x86_64 have an arch_align_stack().
11585 - * All other arches have "#define arch_align_stack(x) (x)"
11586 - * in their asm/system.h
11587 - * As this is included in UML from asm-um/system-generic.h,
11588 - * we can use it to behave as the subarch does.
11589 - */
11590 -#ifndef arch_align_stack
11591 -unsigned long arch_align_stack(unsigned long sp)
11592 -{
11593 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
11594 - sp -= get_random_int() % 8192;
11595 - return sp & ~0xf;
11596 -}
11597 -#endif
11598 -
11599 unsigned long get_wchan(struct task_struct *p)
11600 {
11601 unsigned long stack_page, sp, ip;
11602 diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
11603 index ad8f795..2c7eec6 100644
11604 --- a/arch/unicore32/include/asm/cache.h
11605 +++ b/arch/unicore32/include/asm/cache.h
11606 @@ -12,8 +12,10 @@
11607 #ifndef __UNICORE_CACHE_H__
11608 #define __UNICORE_CACHE_H__
11609
11610 -#define L1_CACHE_SHIFT (5)
11611 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
11612 +#include <linux/const.h>
11613 +
11614 +#define L1_CACHE_SHIFT 5
11615 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
11616
11617 /*
11618 * Memory returned by kmalloc() may be used for DMA, so we must make
11619 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
11620 index f67e839..bfd4748 100644
11621 --- a/arch/x86/Kconfig
11622 +++ b/arch/x86/Kconfig
11623 @@ -247,7 +247,7 @@ config X86_HT
11624
11625 config X86_32_LAZY_GS
11626 def_bool y
11627 - depends on X86_32 && !CC_STACKPROTECTOR
11628 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11629
11630 config ARCH_HWEIGHT_CFLAGS
11631 string
11632 @@ -1099,6 +1099,7 @@ config MICROCODE_EARLY
11633
11634 config X86_MSR
11635 tristate "/dev/cpu/*/msr - Model-specific register support"
11636 + depends on !GRKERNSEC_KMEM
11637 ---help---
11638 This device gives privileged processes access to the x86
11639 Model-Specific Registers (MSRs). It is a character device with
11640 @@ -1122,7 +1123,7 @@ choice
11641
11642 config NOHIGHMEM
11643 bool "off"
11644 - depends on !X86_NUMAQ
11645 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11646 ---help---
11647 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11648 However, the address space of 32-bit x86 processors is only 4
11649 @@ -1159,7 +1160,7 @@ config NOHIGHMEM
11650
11651 config HIGHMEM4G
11652 bool "4GB"
11653 - depends on !X86_NUMAQ
11654 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11655 ---help---
11656 Select this if you have a 32-bit processor and between 1 and 4
11657 gigabytes of physical RAM.
11658 @@ -1212,7 +1213,7 @@ config PAGE_OFFSET
11659 hex
11660 default 0xB0000000 if VMSPLIT_3G_OPT
11661 default 0x80000000 if VMSPLIT_2G
11662 - default 0x78000000 if VMSPLIT_2G_OPT
11663 + default 0x70000000 if VMSPLIT_2G_OPT
11664 default 0x40000000 if VMSPLIT_1G
11665 default 0xC0000000
11666 depends on X86_32
11667 @@ -1614,6 +1615,7 @@ config SECCOMP
11668
11669 config CC_STACKPROTECTOR
11670 bool "Enable -fstack-protector buffer overflow detection"
11671 + depends on X86_64 || !PAX_MEMORY_UDEREF
11672 ---help---
11673 This option turns on the -fstack-protector GCC feature. This
11674 feature puts, at the beginning of functions, a canary value on
11675 @@ -1632,6 +1634,7 @@ source kernel/Kconfig.hz
11676
11677 config KEXEC
11678 bool "kexec system call"
11679 + depends on !GRKERNSEC_KMEM
11680 ---help---
11681 kexec is a system call that implements the ability to shutdown your
11682 current kernel, and to start another kernel. It is like a reboot
11683 @@ -1733,6 +1736,8 @@ config X86_NEED_RELOCS
11684 config PHYSICAL_ALIGN
11685 hex "Alignment value to which kernel should be aligned"
11686 default "0x1000000"
11687 + range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
11688 + range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
11689 range 0x2000 0x1000000 if X86_32
11690 range 0x200000 0x1000000 if X86_64
11691 ---help---
11692 @@ -1812,9 +1817,10 @@ config DEBUG_HOTPLUG_CPU0
11693 If unsure, say N.
11694
11695 config COMPAT_VDSO
11696 - def_bool y
11697 + def_bool n
11698 prompt "Compat VDSO support"
11699 depends on X86_32 || IA32_EMULATION
11700 + depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
11701 ---help---
11702 Map the 32-bit VDSO to the predictable old-style address too.
11703
11704 diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
11705 index c026cca..14657ae 100644
11706 --- a/arch/x86/Kconfig.cpu
11707 +++ b/arch/x86/Kconfig.cpu
11708 @@ -319,7 +319,7 @@ config X86_PPRO_FENCE
11709
11710 config X86_F00F_BUG
11711 def_bool y
11712 - depends on M586MMX || M586TSC || M586 || M486
11713 + depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
11714
11715 config X86_INVD_BUG
11716 def_bool y
11717 @@ -327,7 +327,7 @@ config X86_INVD_BUG
11718
11719 config X86_ALIGNMENT_16
11720 def_bool y
11721 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11722 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11723
11724 config X86_INTEL_USERCOPY
11725 def_bool y
11726 @@ -373,7 +373,7 @@ config X86_CMPXCHG64
11727 # generates cmov.
11728 config X86_CMOV
11729 def_bool y
11730 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11731 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
11732
11733 config X86_MINIMUM_CPU_FAMILY
11734 int
11735 diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
11736 index 78d91af..8ceb94b 100644
11737 --- a/arch/x86/Kconfig.debug
11738 +++ b/arch/x86/Kconfig.debug
11739 @@ -74,7 +74,7 @@ config X86_PTDUMP
11740 config DEBUG_RODATA
11741 bool "Write protect kernel read-only data structures"
11742 default y
11743 - depends on DEBUG_KERNEL
11744 + depends on DEBUG_KERNEL && BROKEN
11745 ---help---
11746 Mark the kernel read-only data as write-protected in the pagetables,
11747 in order to catch accidental (and incorrect) writes to such const
11748 @@ -92,7 +92,7 @@ config DEBUG_RODATA_TEST
11749
11750 config DEBUG_SET_MODULE_RONX
11751 bool "Set loadable kernel module data as NX and text as RO"
11752 - depends on MODULES
11753 + depends on MODULES && BROKEN
11754 ---help---
11755 This option helps catch unintended modifications to loadable
11756 kernel module's text and read-only data. It also prevents execution
11757 diff --git a/arch/x86/Makefile b/arch/x86/Makefile
11758 index 57d0215..b4373fb 100644
11759 --- a/arch/x86/Makefile
11760 +++ b/arch/x86/Makefile
11761 @@ -49,14 +49,12 @@ ifeq ($(CONFIG_X86_32),y)
11762 # CPU-specific tuning. Anything which can be shared with UML should go here.
11763 include $(srctree)/arch/x86/Makefile_32.cpu
11764 KBUILD_CFLAGS += $(cflags-y)
11765 -
11766 - # temporary until string.h is fixed
11767 - KBUILD_CFLAGS += -ffreestanding
11768 else
11769 BITS := 64
11770 UTS_MACHINE := x86_64
11771 CHECKFLAGS += -D__x86_64__ -m64
11772
11773 + biarch := $(call cc-option,-m64)
11774 KBUILD_AFLAGS += -m64
11775 KBUILD_CFLAGS += -m64
11776
11777 @@ -89,6 +87,9 @@ else
11778 KBUILD_CFLAGS += -maccumulate-outgoing-args
11779 endif
11780
11781 +# temporary until string.h is fixed
11782 +KBUILD_CFLAGS += -ffreestanding
11783 +
11784 ifdef CONFIG_CC_STACKPROTECTOR
11785 cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh
11786 ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y)
11787 @@ -247,3 +248,12 @@ define archhelp
11788 echo ' FDINITRD=file initrd for the booted kernel'
11789 echo ' kvmconfig - Enable additional options for guest kernel support'
11790 endef
11791 +
11792 +define OLD_LD
11793 +
11794 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
11795 +*** Please upgrade your binutils to 2.18 or newer
11796 +endef
11797 +
11798 +archprepare:
11799 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
11800 diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
11801 index 6cf0111..f2e2398 100644
11802 --- a/arch/x86/boot/Makefile
11803 +++ b/arch/x86/boot/Makefile
11804 @@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
11805 $(call cc-option, -fno-unit-at-a-time)) \
11806 $(call cc-option, -fno-stack-protector) \
11807 $(call cc-option, -mpreferred-stack-boundary=2)
11808 +ifdef CONSTIFY_PLUGIN
11809 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11810 +endif
11811 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11812 GCOV_PROFILE := n
11813
11814 diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
11815 index 878e4b9..20537ab 100644
11816 --- a/arch/x86/boot/bitops.h
11817 +++ b/arch/x86/boot/bitops.h
11818 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11819 u8 v;
11820 const u32 *p = (const u32 *)addr;
11821
11822 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11823 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
11824 return v;
11825 }
11826
11827 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
11828
11829 static inline void set_bit(int nr, void *addr)
11830 {
11831 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11832 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
11833 }
11834
11835 #endif /* BOOT_BITOPS_H */
11836 diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
11837 index ef72bae..353a184 100644
11838 --- a/arch/x86/boot/boot.h
11839 +++ b/arch/x86/boot/boot.h
11840 @@ -85,7 +85,7 @@ static inline void io_delay(void)
11841 static inline u16 ds(void)
11842 {
11843 u16 seg;
11844 - asm("movw %%ds,%0" : "=rm" (seg));
11845 + asm volatile("movw %%ds,%0" : "=rm" (seg));
11846 return seg;
11847 }
11848
11849 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
11850 static inline int memcmp(const void *s1, const void *s2, size_t len)
11851 {
11852 u8 diff;
11853 - asm("repe; cmpsb; setnz %0"
11854 + asm volatile("repe; cmpsb; setnz %0"
11855 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
11856 return diff;
11857 }
11858 diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
11859 index c8a6792..2402765 100644
11860 --- a/arch/x86/boot/compressed/Makefile
11861 +++ b/arch/x86/boot/compressed/Makefile
11862 @@ -16,6 +16,9 @@ KBUILD_CFLAGS += $(cflags-y)
11863 KBUILD_CFLAGS += -mno-mmx -mno-sse
11864 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
11865 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
11866 +ifdef CONSTIFY_PLUGIN
11867 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
11868 +endif
11869
11870 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11871 GCOV_PROFILE := n
11872 diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
11873 index b7388a4..03844ec 100644
11874 --- a/arch/x86/boot/compressed/eboot.c
11875 +++ b/arch/x86/boot/compressed/eboot.c
11876 @@ -150,7 +150,6 @@ again:
11877 *addr = max_addr;
11878 }
11879
11880 -free_pool:
11881 efi_call_phys1(sys_table->boottime->free_pool, map);
11882
11883 fail:
11884 @@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
11885 if (i == map_size / desc_size)
11886 status = EFI_NOT_FOUND;
11887
11888 -free_pool:
11889 efi_call_phys1(sys_table->boottime->free_pool, map);
11890 fail:
11891 return status;
11892 diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
11893 index a53440e..c3dbf1e 100644
11894 --- a/arch/x86/boot/compressed/efi_stub_32.S
11895 +++ b/arch/x86/boot/compressed/efi_stub_32.S
11896 @@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
11897 * parameter 2, ..., param n. To make things easy, we save the return
11898 * address of efi_call_phys in a global variable.
11899 */
11900 - popl %ecx
11901 - movl %ecx, saved_return_addr(%edx)
11902 - /* get the function pointer into ECX*/
11903 - popl %ecx
11904 - movl %ecx, efi_rt_function_ptr(%edx)
11905 + popl saved_return_addr(%edx)
11906 + popl efi_rt_function_ptr(%edx)
11907
11908 /*
11909 * 3. Call the physical function.
11910 */
11911 - call *%ecx
11912 + call *efi_rt_function_ptr(%edx)
11913
11914 /*
11915 * 4. Balance the stack. And because EAX contain the return value,
11916 @@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
11917 1: popl %edx
11918 subl $1b, %edx
11919
11920 - movl efi_rt_function_ptr(%edx), %ecx
11921 - pushl %ecx
11922 + pushl efi_rt_function_ptr(%edx)
11923
11924 /*
11925 * 10. Push the saved return address onto the stack and return.
11926 */
11927 - movl saved_return_addr(%edx), %ecx
11928 - pushl %ecx
11929 - ret
11930 + jmpl *saved_return_addr(%edx)
11931 ENDPROC(efi_call_phys)
11932 .previous
11933
11934 diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
11935 index 5d6f689..9d06730 100644
11936 --- a/arch/x86/boot/compressed/head_32.S
11937 +++ b/arch/x86/boot/compressed/head_32.S
11938 @@ -118,7 +118,7 @@ preferred_addr:
11939 notl %eax
11940 andl %eax, %ebx
11941 #else
11942 - movl $LOAD_PHYSICAL_ADDR, %ebx
11943 + movl $____LOAD_PHYSICAL_ADDR, %ebx
11944 #endif
11945
11946 /* Target address to relocate to for decompression */
11947 diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
11948 index c337422..2c5be72 100644
11949 --- a/arch/x86/boot/compressed/head_64.S
11950 +++ b/arch/x86/boot/compressed/head_64.S
11951 @@ -95,7 +95,7 @@ ENTRY(startup_32)
11952 notl %eax
11953 andl %eax, %ebx
11954 #else
11955 - movl $LOAD_PHYSICAL_ADDR, %ebx
11956 + movl $____LOAD_PHYSICAL_ADDR, %ebx
11957 #endif
11958
11959 /* Target address to relocate to for decompression */
11960 @@ -270,7 +270,7 @@ preferred_addr:
11961 notq %rax
11962 andq %rax, %rbp
11963 #else
11964 - movq $LOAD_PHYSICAL_ADDR, %rbp
11965 + movq $____LOAD_PHYSICAL_ADDR, %rbp
11966 #endif
11967
11968 /* Target address to relocate to for decompression */
11969 @@ -362,8 +362,8 @@ gdt:
11970 .long gdt
11971 .word 0
11972 .quad 0x0000000000000000 /* NULL descriptor */
11973 - .quad 0x00af9a000000ffff /* __KERNEL_CS */
11974 - .quad 0x00cf92000000ffff /* __KERNEL_DS */
11975 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
11976 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
11977 .quad 0x0080890000000000 /* TS descriptor */
11978 .quad 0x0000000000000000 /* TS continued */
11979 gdt_end:
11980 diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
11981 index 434f077..b6b4b38 100644
11982 --- a/arch/x86/boot/compressed/misc.c
11983 +++ b/arch/x86/boot/compressed/misc.c
11984 @@ -283,7 +283,7 @@ static void handle_relocations(void *output, unsigned long output_len)
11985 * Calculate the delta between where vmlinux was linked to load
11986 * and where it was actually loaded.
11987 */
11988 - delta = min_addr - LOAD_PHYSICAL_ADDR;
11989 + delta = min_addr - ____LOAD_PHYSICAL_ADDR;
11990 if (!delta) {
11991 debug_putstr("No relocation needed... ");
11992 return;
11993 @@ -380,7 +380,7 @@ static void parse_elf(void *output)
11994 case PT_LOAD:
11995 #ifdef CONFIG_RELOCATABLE
11996 dest = output;
11997 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
11998 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
11999 #else
12000 dest = (void *)(phdr->p_paddr);
12001 #endif
12002 @@ -432,7 +432,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
12003 error("Destination address too large");
12004 #endif
12005 #ifndef CONFIG_RELOCATABLE
12006 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
12007 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
12008 error("Wrong destination address");
12009 #endif
12010
12011 diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
12012 index 4d3ff03..e4972ff 100644
12013 --- a/arch/x86/boot/cpucheck.c
12014 +++ b/arch/x86/boot/cpucheck.c
12015 @@ -74,7 +74,7 @@ static int has_fpu(void)
12016 u16 fcw = -1, fsw = -1;
12017 u32 cr0;
12018
12019 - asm("movl %%cr0,%0" : "=r" (cr0));
12020 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
12021 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
12022 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
12023 asm volatile("movl %0,%%cr0" : : "r" (cr0));
12024 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
12025 {
12026 u32 f0, f1;
12027
12028 - asm("pushfl ; "
12029 + asm volatile("pushfl ; "
12030 "pushfl ; "
12031 "popl %0 ; "
12032 "movl %0,%1 ; "
12033 @@ -115,7 +115,7 @@ static void get_flags(void)
12034 set_bit(X86_FEATURE_FPU, cpu.flags);
12035
12036 if (has_eflag(X86_EFLAGS_ID)) {
12037 - asm("cpuid"
12038 + asm volatile("cpuid"
12039 : "=a" (max_intel_level),
12040 "=b" (cpu_vendor[0]),
12041 "=d" (cpu_vendor[1]),
12042 @@ -124,7 +124,7 @@ static void get_flags(void)
12043
12044 if (max_intel_level >= 0x00000001 &&
12045 max_intel_level <= 0x0000ffff) {
12046 - asm("cpuid"
12047 + asm volatile("cpuid"
12048 : "=a" (tfms),
12049 "=c" (cpu.flags[4]),
12050 "=d" (cpu.flags[0])
12051 @@ -136,7 +136,7 @@ static void get_flags(void)
12052 cpu.model += ((tfms >> 16) & 0xf) << 4;
12053 }
12054
12055 - asm("cpuid"
12056 + asm volatile("cpuid"
12057 : "=a" (max_amd_level)
12058 : "a" (0x80000000)
12059 : "ebx", "ecx", "edx");
12060 @@ -144,7 +144,7 @@ static void get_flags(void)
12061 if (max_amd_level >= 0x80000001 &&
12062 max_amd_level <= 0x8000ffff) {
12063 u32 eax = 0x80000001;
12064 - asm("cpuid"
12065 + asm volatile("cpuid"
12066 : "+a" (eax),
12067 "=c" (cpu.flags[6]),
12068 "=d" (cpu.flags[1])
12069 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12070 u32 ecx = MSR_K7_HWCR;
12071 u32 eax, edx;
12072
12073 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12074 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12075 eax &= ~(1 << 15);
12076 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12077 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12078
12079 get_flags(); /* Make sure it really did something */
12080 err = check_flags();
12081 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12082 u32 ecx = MSR_VIA_FCR;
12083 u32 eax, edx;
12084
12085 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12086 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12087 eax |= (1<<1)|(1<<7);
12088 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12089 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12090
12091 set_bit(X86_FEATURE_CX8, cpu.flags);
12092 err = check_flags();
12093 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
12094 u32 eax, edx;
12095 u32 level = 1;
12096
12097 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12098 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12099 - asm("cpuid"
12100 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
12101 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
12102 + asm volatile("cpuid"
12103 : "+a" (level), "=d" (cpu.flags[0])
12104 : : "ecx", "ebx");
12105 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12106 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
12107
12108 err = check_flags();
12109 }
12110 diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
12111 index 9ec06a1..2c25e79 100644
12112 --- a/arch/x86/boot/header.S
12113 +++ b/arch/x86/boot/header.S
12114 @@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
12115 # single linked list of
12116 # struct setup_data
12117
12118 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
12119 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
12120
12121 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
12122 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
12123 +#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
12124 +#else
12125 #define VO_INIT_SIZE (VO__end - VO__text)
12126 +#endif
12127 #if ZO_INIT_SIZE > VO_INIT_SIZE
12128 #define INIT_SIZE ZO_INIT_SIZE
12129 #else
12130 diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
12131 index db75d07..8e6d0af 100644
12132 --- a/arch/x86/boot/memory.c
12133 +++ b/arch/x86/boot/memory.c
12134 @@ -19,7 +19,7 @@
12135
12136 static int detect_memory_e820(void)
12137 {
12138 - int count = 0;
12139 + unsigned int count = 0;
12140 struct biosregs ireg, oreg;
12141 struct e820entry *desc = boot_params.e820_map;
12142 static struct e820entry buf; /* static so it is zeroed */
12143 diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
12144 index 11e8c6e..fdbb1ed 100644
12145 --- a/arch/x86/boot/video-vesa.c
12146 +++ b/arch/x86/boot/video-vesa.c
12147 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
12148
12149 boot_params.screen_info.vesapm_seg = oreg.es;
12150 boot_params.screen_info.vesapm_off = oreg.di;
12151 + boot_params.screen_info.vesapm_size = oreg.cx;
12152 }
12153
12154 /*
12155 diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
12156 index 43eda28..5ab5fdb 100644
12157 --- a/arch/x86/boot/video.c
12158 +++ b/arch/x86/boot/video.c
12159 @@ -96,7 +96,7 @@ static void store_mode_params(void)
12160 static unsigned int get_entry(void)
12161 {
12162 char entry_buf[4];
12163 - int i, len = 0;
12164 + unsigned int i, len = 0;
12165 int key;
12166 unsigned int v;
12167
12168 diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
12169 index 9105655..41779c1 100644
12170 --- a/arch/x86/crypto/aes-x86_64-asm_64.S
12171 +++ b/arch/x86/crypto/aes-x86_64-asm_64.S
12172 @@ -8,6 +8,8 @@
12173 * including this sentence is retained in full.
12174 */
12175
12176 +#include <asm/alternative-asm.h>
12177 +
12178 .extern crypto_ft_tab
12179 .extern crypto_it_tab
12180 .extern crypto_fl_tab
12181 @@ -70,6 +72,8 @@
12182 je B192; \
12183 leaq 32(r9),r9;
12184
12185 +#define ret pax_force_retaddr; ret
12186 +
12187 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
12188 movq r1,r2; \
12189 movq r3,r4; \
12190 diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
12191 index 477e9d7..c92c7d8 100644
12192 --- a/arch/x86/crypto/aesni-intel_asm.S
12193 +++ b/arch/x86/crypto/aesni-intel_asm.S
12194 @@ -31,6 +31,7 @@
12195
12196 #include <linux/linkage.h>
12197 #include <asm/inst.h>
12198 +#include <asm/alternative-asm.h>
12199
12200 #ifdef __x86_64__
12201 .data
12202 @@ -205,7 +206,7 @@ enc: .octa 0x2
12203 * num_initial_blocks = b mod 4
12204 * encrypt the initial num_initial_blocks blocks and apply ghash on
12205 * the ciphertext
12206 -* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12207 +* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12208 * are clobbered
12209 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
12210 */
12211 @@ -214,8 +215,8 @@ enc: .octa 0x2
12212 .macro INITIAL_BLOCKS_DEC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
12213 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
12214 mov arg7, %r10 # %r10 = AAD
12215 - mov arg8, %r12 # %r12 = aadLen
12216 - mov %r12, %r11
12217 + mov arg8, %r15 # %r15 = aadLen
12218 + mov %r15, %r11
12219 pxor %xmm\i, %xmm\i
12220 _get_AAD_loop\num_initial_blocks\operation:
12221 movd (%r10), \TMP1
12222 @@ -223,15 +224,15 @@ _get_AAD_loop\num_initial_blocks\operation:
12223 psrldq $4, %xmm\i
12224 pxor \TMP1, %xmm\i
12225 add $4, %r10
12226 - sub $4, %r12
12227 + sub $4, %r15
12228 jne _get_AAD_loop\num_initial_blocks\operation
12229 cmp $16, %r11
12230 je _get_AAD_loop2_done\num_initial_blocks\operation
12231 - mov $16, %r12
12232 + mov $16, %r15
12233 _get_AAD_loop2\num_initial_blocks\operation:
12234 psrldq $4, %xmm\i
12235 - sub $4, %r12
12236 - cmp %r11, %r12
12237 + sub $4, %r15
12238 + cmp %r11, %r15
12239 jne _get_AAD_loop2\num_initial_blocks\operation
12240 _get_AAD_loop2_done\num_initial_blocks\operation:
12241 movdqa SHUF_MASK(%rip), %xmm14
12242 @@ -443,7 +444,7 @@ _initial_blocks_done\num_initial_blocks\operation:
12243 * num_initial_blocks = b mod 4
12244 * encrypt the initial num_initial_blocks blocks and apply ghash on
12245 * the ciphertext
12246 -* %r10, %r11, %r12, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12247 +* %r10, %r11, %r15, %rax, %xmm5, %xmm6, %xmm7, %xmm8, %xmm9 registers
12248 * are clobbered
12249 * arg1, %arg2, %arg3, %r14 are used as a pointer only, not modified
12250 */
12251 @@ -452,8 +453,8 @@ _initial_blocks_done\num_initial_blocks\operation:
12252 .macro INITIAL_BLOCKS_ENC num_initial_blocks TMP1 TMP2 TMP3 TMP4 TMP5 XMM0 XMM1 \
12253 XMM2 XMM3 XMM4 XMMDst TMP6 TMP7 i i_seq operation
12254 mov arg7, %r10 # %r10 = AAD
12255 - mov arg8, %r12 # %r12 = aadLen
12256 - mov %r12, %r11
12257 + mov arg8, %r15 # %r15 = aadLen
12258 + mov %r15, %r11
12259 pxor %xmm\i, %xmm\i
12260 _get_AAD_loop\num_initial_blocks\operation:
12261 movd (%r10), \TMP1
12262 @@ -461,15 +462,15 @@ _get_AAD_loop\num_initial_blocks\operation:
12263 psrldq $4, %xmm\i
12264 pxor \TMP1, %xmm\i
12265 add $4, %r10
12266 - sub $4, %r12
12267 + sub $4, %r15
12268 jne _get_AAD_loop\num_initial_blocks\operation
12269 cmp $16, %r11
12270 je _get_AAD_loop2_done\num_initial_blocks\operation
12271 - mov $16, %r12
12272 + mov $16, %r15
12273 _get_AAD_loop2\num_initial_blocks\operation:
12274 psrldq $4, %xmm\i
12275 - sub $4, %r12
12276 - cmp %r11, %r12
12277 + sub $4, %r15
12278 + cmp %r11, %r15
12279 jne _get_AAD_loop2\num_initial_blocks\operation
12280 _get_AAD_loop2_done\num_initial_blocks\operation:
12281 movdqa SHUF_MASK(%rip), %xmm14
12282 @@ -1269,7 +1270,7 @@ TMP7 XMM1 XMM2 XMM3 XMM4 XMMDst
12283 *
12284 *****************************************************************************/
12285 ENTRY(aesni_gcm_dec)
12286 - push %r12
12287 + push %r15
12288 push %r13
12289 push %r14
12290 mov %rsp, %r14
12291 @@ -1279,8 +1280,8 @@ ENTRY(aesni_gcm_dec)
12292 */
12293 sub $VARIABLE_OFFSET, %rsp
12294 and $~63, %rsp # align rsp to 64 bytes
12295 - mov %arg6, %r12
12296 - movdqu (%r12), %xmm13 # %xmm13 = HashKey
12297 + mov %arg6, %r15
12298 + movdqu (%r15), %xmm13 # %xmm13 = HashKey
12299 movdqa SHUF_MASK(%rip), %xmm2
12300 PSHUFB_XMM %xmm2, %xmm13
12301
12302 @@ -1308,10 +1309,10 @@ ENTRY(aesni_gcm_dec)
12303 movdqa %xmm13, HashKey(%rsp) # store HashKey<<1 (mod poly)
12304 mov %arg4, %r13 # save the number of bytes of plaintext/ciphertext
12305 and $-16, %r13 # %r13 = %r13 - (%r13 mod 16)
12306 - mov %r13, %r12
12307 - and $(3<<4), %r12
12308 + mov %r13, %r15
12309 + and $(3<<4), %r15
12310 jz _initial_num_blocks_is_0_decrypt
12311 - cmp $(2<<4), %r12
12312 + cmp $(2<<4), %r15
12313 jb _initial_num_blocks_is_1_decrypt
12314 je _initial_num_blocks_is_2_decrypt
12315 _initial_num_blocks_is_3_decrypt:
12316 @@ -1361,16 +1362,16 @@ _zero_cipher_left_decrypt:
12317 sub $16, %r11
12318 add %r13, %r11
12319 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte block
12320 - lea SHIFT_MASK+16(%rip), %r12
12321 - sub %r13, %r12
12322 + lea SHIFT_MASK+16(%rip), %r15
12323 + sub %r13, %r15
12324 # adjust the shuffle mask pointer to be able to shift 16-%r13 bytes
12325 # (%r13 is the number of bytes in plaintext mod 16)
12326 - movdqu (%r12), %xmm2 # get the appropriate shuffle mask
12327 + movdqu (%r15), %xmm2 # get the appropriate shuffle mask
12328 PSHUFB_XMM %xmm2, %xmm1 # right shift 16-%r13 butes
12329
12330 movdqa %xmm1, %xmm2
12331 pxor %xmm1, %xmm0 # Ciphertext XOR E(K, Yn)
12332 - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
12333 + movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
12334 # get the appropriate mask to mask out top 16-%r13 bytes of %xmm0
12335 pand %xmm1, %xmm0 # mask out top 16-%r13 bytes of %xmm0
12336 pand %xmm1, %xmm2
12337 @@ -1399,9 +1400,9 @@ _less_than_8_bytes_left_decrypt:
12338 sub $1, %r13
12339 jne _less_than_8_bytes_left_decrypt
12340 _multiple_of_16_bytes_decrypt:
12341 - mov arg8, %r12 # %r13 = aadLen (number of bytes)
12342 - shl $3, %r12 # convert into number of bits
12343 - movd %r12d, %xmm15 # len(A) in %xmm15
12344 + mov arg8, %r15 # %r13 = aadLen (number of bytes)
12345 + shl $3, %r15 # convert into number of bits
12346 + movd %r15d, %xmm15 # len(A) in %xmm15
12347 shl $3, %arg4 # len(C) in bits (*128)
12348 MOVQ_R64_XMM %arg4, %xmm1
12349 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
12350 @@ -1440,7 +1441,8 @@ _return_T_done_decrypt:
12351 mov %r14, %rsp
12352 pop %r14
12353 pop %r13
12354 - pop %r12
12355 + pop %r15
12356 + pax_force_retaddr
12357 ret
12358 ENDPROC(aesni_gcm_dec)
12359
12360 @@ -1529,7 +1531,7 @@ ENDPROC(aesni_gcm_dec)
12361 * poly = x^128 + x^127 + x^126 + x^121 + 1
12362 ***************************************************************************/
12363 ENTRY(aesni_gcm_enc)
12364 - push %r12
12365 + push %r15
12366 push %r13
12367 push %r14
12368 mov %rsp, %r14
12369 @@ -1539,8 +1541,8 @@ ENTRY(aesni_gcm_enc)
12370 #
12371 sub $VARIABLE_OFFSET, %rsp
12372 and $~63, %rsp
12373 - mov %arg6, %r12
12374 - movdqu (%r12), %xmm13
12375 + mov %arg6, %r15
12376 + movdqu (%r15), %xmm13
12377 movdqa SHUF_MASK(%rip), %xmm2
12378 PSHUFB_XMM %xmm2, %xmm13
12379
12380 @@ -1564,13 +1566,13 @@ ENTRY(aesni_gcm_enc)
12381 movdqa %xmm13, HashKey(%rsp)
12382 mov %arg4, %r13 # %xmm13 holds HashKey<<1 (mod poly)
12383 and $-16, %r13
12384 - mov %r13, %r12
12385 + mov %r13, %r15
12386
12387 # Encrypt first few blocks
12388
12389 - and $(3<<4), %r12
12390 + and $(3<<4), %r15
12391 jz _initial_num_blocks_is_0_encrypt
12392 - cmp $(2<<4), %r12
12393 + cmp $(2<<4), %r15
12394 jb _initial_num_blocks_is_1_encrypt
12395 je _initial_num_blocks_is_2_encrypt
12396 _initial_num_blocks_is_3_encrypt:
12397 @@ -1623,14 +1625,14 @@ _zero_cipher_left_encrypt:
12398 sub $16, %r11
12399 add %r13, %r11
12400 movdqu (%arg3,%r11,1), %xmm1 # receive the last <16 byte blocks
12401 - lea SHIFT_MASK+16(%rip), %r12
12402 - sub %r13, %r12
12403 + lea SHIFT_MASK+16(%rip), %r15
12404 + sub %r13, %r15
12405 # adjust the shuffle mask pointer to be able to shift 16-r13 bytes
12406 # (%r13 is the number of bytes in plaintext mod 16)
12407 - movdqu (%r12), %xmm2 # get the appropriate shuffle mask
12408 + movdqu (%r15), %xmm2 # get the appropriate shuffle mask
12409 PSHUFB_XMM %xmm2, %xmm1 # shift right 16-r13 byte
12410 pxor %xmm1, %xmm0 # Plaintext XOR Encrypt(K, Yn)
12411 - movdqu ALL_F-SHIFT_MASK(%r12), %xmm1
12412 + movdqu ALL_F-SHIFT_MASK(%r15), %xmm1
12413 # get the appropriate mask to mask out top 16-r13 bytes of xmm0
12414 pand %xmm1, %xmm0 # mask out top 16-r13 bytes of xmm0
12415 movdqa SHUF_MASK(%rip), %xmm10
12416 @@ -1663,9 +1665,9 @@ _less_than_8_bytes_left_encrypt:
12417 sub $1, %r13
12418 jne _less_than_8_bytes_left_encrypt
12419 _multiple_of_16_bytes_encrypt:
12420 - mov arg8, %r12 # %r12 = addLen (number of bytes)
12421 - shl $3, %r12
12422 - movd %r12d, %xmm15 # len(A) in %xmm15
12423 + mov arg8, %r15 # %r15 = addLen (number of bytes)
12424 + shl $3, %r15
12425 + movd %r15d, %xmm15 # len(A) in %xmm15
12426 shl $3, %arg4 # len(C) in bits (*128)
12427 MOVQ_R64_XMM %arg4, %xmm1
12428 pslldq $8, %xmm15 # %xmm15 = len(A)||0x0000000000000000
12429 @@ -1704,7 +1706,8 @@ _return_T_done_encrypt:
12430 mov %r14, %rsp
12431 pop %r14
12432 pop %r13
12433 - pop %r12
12434 + pop %r15
12435 + pax_force_retaddr
12436 ret
12437 ENDPROC(aesni_gcm_enc)
12438
12439 @@ -1722,6 +1725,7 @@ _key_expansion_256a:
12440 pxor %xmm1, %xmm0
12441 movaps %xmm0, (TKEYP)
12442 add $0x10, TKEYP
12443 + pax_force_retaddr
12444 ret
12445 ENDPROC(_key_expansion_128)
12446 ENDPROC(_key_expansion_256a)
12447 @@ -1748,6 +1752,7 @@ _key_expansion_192a:
12448 shufps $0b01001110, %xmm2, %xmm1
12449 movaps %xmm1, 0x10(TKEYP)
12450 add $0x20, TKEYP
12451 + pax_force_retaddr
12452 ret
12453 ENDPROC(_key_expansion_192a)
12454
12455 @@ -1768,6 +1773,7 @@ _key_expansion_192b:
12456
12457 movaps %xmm0, (TKEYP)
12458 add $0x10, TKEYP
12459 + pax_force_retaddr
12460 ret
12461 ENDPROC(_key_expansion_192b)
12462
12463 @@ -1781,6 +1787,7 @@ _key_expansion_256b:
12464 pxor %xmm1, %xmm2
12465 movaps %xmm2, (TKEYP)
12466 add $0x10, TKEYP
12467 + pax_force_retaddr
12468 ret
12469 ENDPROC(_key_expansion_256b)
12470
12471 @@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
12472 #ifndef __x86_64__
12473 popl KEYP
12474 #endif
12475 + pax_force_retaddr
12476 ret
12477 ENDPROC(aesni_set_key)
12478
12479 @@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
12480 popl KLEN
12481 popl KEYP
12482 #endif
12483 + pax_force_retaddr
12484 ret
12485 ENDPROC(aesni_enc)
12486
12487 @@ -1974,6 +1983,7 @@ _aesni_enc1:
12488 AESENC KEY STATE
12489 movaps 0x70(TKEYP), KEY
12490 AESENCLAST KEY STATE
12491 + pax_force_retaddr
12492 ret
12493 ENDPROC(_aesni_enc1)
12494
12495 @@ -2083,6 +2093,7 @@ _aesni_enc4:
12496 AESENCLAST KEY STATE2
12497 AESENCLAST KEY STATE3
12498 AESENCLAST KEY STATE4
12499 + pax_force_retaddr
12500 ret
12501 ENDPROC(_aesni_enc4)
12502
12503 @@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
12504 popl KLEN
12505 popl KEYP
12506 #endif
12507 + pax_force_retaddr
12508 ret
12509 ENDPROC(aesni_dec)
12510
12511 @@ -2164,6 +2176,7 @@ _aesni_dec1:
12512 AESDEC KEY STATE
12513 movaps 0x70(TKEYP), KEY
12514 AESDECLAST KEY STATE
12515 + pax_force_retaddr
12516 ret
12517 ENDPROC(_aesni_dec1)
12518
12519 @@ -2273,6 +2286,7 @@ _aesni_dec4:
12520 AESDECLAST KEY STATE2
12521 AESDECLAST KEY STATE3
12522 AESDECLAST KEY STATE4
12523 + pax_force_retaddr
12524 ret
12525 ENDPROC(_aesni_dec4)
12526
12527 @@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
12528 popl KEYP
12529 popl LEN
12530 #endif
12531 + pax_force_retaddr
12532 ret
12533 ENDPROC(aesni_ecb_enc)
12534
12535 @@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
12536 popl KEYP
12537 popl LEN
12538 #endif
12539 + pax_force_retaddr
12540 ret
12541 ENDPROC(aesni_ecb_dec)
12542
12543 @@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
12544 popl LEN
12545 popl IVP
12546 #endif
12547 + pax_force_retaddr
12548 ret
12549 ENDPROC(aesni_cbc_enc)
12550
12551 @@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
12552 popl LEN
12553 popl IVP
12554 #endif
12555 + pax_force_retaddr
12556 ret
12557 ENDPROC(aesni_cbc_dec)
12558
12559 @@ -2550,6 +2568,7 @@ _aesni_inc_init:
12560 mov $1, TCTR_LOW
12561 MOVQ_R64_XMM TCTR_LOW INC
12562 MOVQ_R64_XMM CTR TCTR_LOW
12563 + pax_force_retaddr
12564 ret
12565 ENDPROC(_aesni_inc_init)
12566
12567 @@ -2579,6 +2598,7 @@ _aesni_inc:
12568 .Linc_low:
12569 movaps CTR, IV
12570 PSHUFB_XMM BSWAP_MASK IV
12571 + pax_force_retaddr
12572 ret
12573 ENDPROC(_aesni_inc)
12574
12575 @@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
12576 .Lctr_enc_ret:
12577 movups IV, (IVP)
12578 .Lctr_enc_just_ret:
12579 + pax_force_retaddr
12580 ret
12581 ENDPROC(aesni_ctr_enc)
12582
12583 @@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
12584 pxor INC, STATE4
12585 movdqu STATE4, 0x70(OUTP)
12586
12587 + pax_force_retaddr
12588 ret
12589 ENDPROC(aesni_xts_crypt8)
12590
12591 diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12592 index 246c670..466e2d6 100644
12593 --- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
12594 +++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
12595 @@ -21,6 +21,7 @@
12596 */
12597
12598 #include <linux/linkage.h>
12599 +#include <asm/alternative-asm.h>
12600
12601 .file "blowfish-x86_64-asm.S"
12602 .text
12603 @@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
12604 jnz .L__enc_xor;
12605
12606 write_block();
12607 + pax_force_retaddr
12608 ret;
12609 .L__enc_xor:
12610 xor_block();
12611 + pax_force_retaddr
12612 ret;
12613 ENDPROC(__blowfish_enc_blk)
12614
12615 @@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
12616
12617 movq %r11, %rbp;
12618
12619 + pax_force_retaddr
12620 ret;
12621 ENDPROC(blowfish_dec_blk)
12622
12623 @@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
12624
12625 popq %rbx;
12626 popq %rbp;
12627 + pax_force_retaddr
12628 ret;
12629
12630 .L__enc_xor4:
12631 @@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
12632
12633 popq %rbx;
12634 popq %rbp;
12635 + pax_force_retaddr
12636 ret;
12637 ENDPROC(__blowfish_enc_blk_4way)
12638
12639 @@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
12640 popq %rbx;
12641 popq %rbp;
12642
12643 + pax_force_retaddr
12644 ret;
12645 ENDPROC(blowfish_dec_blk_4way)
12646 diff --git a/arch/x86/crypto/camellia-aesni-avx-asm_64.S b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12647 index ce71f92..1dce7ec 100644
12648 --- a/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12649 +++ b/arch/x86/crypto/camellia-aesni-avx-asm_64.S
12650 @@ -16,6 +16,7 @@
12651 */
12652
12653 #include <linux/linkage.h>
12654 +#include <asm/alternative-asm.h>
12655
12656 #define CAMELLIA_TABLE_BYTE_LEN 272
12657
12658 @@ -191,6 +192,7 @@ roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12659 roundsm16(%xmm0, %xmm1, %xmm2, %xmm3, %xmm4, %xmm5, %xmm6, %xmm7,
12660 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14, %xmm15,
12661 %rcx, (%r9));
12662 + pax_force_retaddr
12663 ret;
12664 ENDPROC(roundsm16_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12665
12666 @@ -199,6 +201,7 @@ roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12667 roundsm16(%xmm4, %xmm5, %xmm6, %xmm7, %xmm0, %xmm1, %xmm2, %xmm3,
12668 %xmm12, %xmm13, %xmm14, %xmm15, %xmm8, %xmm9, %xmm10, %xmm11,
12669 %rax, (%r9));
12670 + pax_force_retaddr
12671 ret;
12672 ENDPROC(roundsm16_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12673
12674 @@ -780,6 +783,7 @@ __camellia_enc_blk16:
12675 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12676 %xmm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 16(%rax));
12677
12678 + pax_force_retaddr
12679 ret;
12680
12681 .align 8
12682 @@ -865,6 +869,7 @@ __camellia_dec_blk16:
12683 %xmm8, %xmm9, %xmm10, %xmm11, %xmm12, %xmm13, %xmm14,
12684 %xmm15, (key_table)(CTX), (%rax), 1 * 16(%rax));
12685
12686 + pax_force_retaddr
12687 ret;
12688
12689 .align 8
12690 @@ -904,6 +909,7 @@ ENTRY(camellia_ecb_enc_16way)
12691 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12692 %xmm8, %rsi);
12693
12694 + pax_force_retaddr
12695 ret;
12696 ENDPROC(camellia_ecb_enc_16way)
12697
12698 @@ -932,6 +938,7 @@ ENTRY(camellia_ecb_dec_16way)
12699 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12700 %xmm8, %rsi);
12701
12702 + pax_force_retaddr
12703 ret;
12704 ENDPROC(camellia_ecb_dec_16way)
12705
12706 @@ -981,6 +988,7 @@ ENTRY(camellia_cbc_dec_16way)
12707 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12708 %xmm8, %rsi);
12709
12710 + pax_force_retaddr
12711 ret;
12712 ENDPROC(camellia_cbc_dec_16way)
12713
12714 @@ -1092,6 +1100,7 @@ ENTRY(camellia_ctr_16way)
12715 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12716 %xmm8, %rsi);
12717
12718 + pax_force_retaddr
12719 ret;
12720 ENDPROC(camellia_ctr_16way)
12721
12722 @@ -1234,6 +1243,7 @@ camellia_xts_crypt_16way:
12723 %xmm15, %xmm14, %xmm13, %xmm12, %xmm11, %xmm10, %xmm9,
12724 %xmm8, %rsi);
12725
12726 + pax_force_retaddr
12727 ret;
12728 ENDPROC(camellia_xts_crypt_16way)
12729
12730 diff --git a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12731 index 0e0b886..5a3123c 100644
12732 --- a/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12733 +++ b/arch/x86/crypto/camellia-aesni-avx2-asm_64.S
12734 @@ -11,6 +11,7 @@
12735 */
12736
12737 #include <linux/linkage.h>
12738 +#include <asm/alternative-asm.h>
12739
12740 #define CAMELLIA_TABLE_BYTE_LEN 272
12741
12742 @@ -230,6 +231,7 @@ roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd:
12743 roundsm32(%ymm0, %ymm1, %ymm2, %ymm3, %ymm4, %ymm5, %ymm6, %ymm7,
12744 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14, %ymm15,
12745 %rcx, (%r9));
12746 + pax_force_retaddr
12747 ret;
12748 ENDPROC(roundsm32_x0_x1_x2_x3_x4_x5_x6_x7_y0_y1_y2_y3_y4_y5_y6_y7_cd)
12749
12750 @@ -238,6 +240,7 @@ roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab:
12751 roundsm32(%ymm4, %ymm5, %ymm6, %ymm7, %ymm0, %ymm1, %ymm2, %ymm3,
12752 %ymm12, %ymm13, %ymm14, %ymm15, %ymm8, %ymm9, %ymm10, %ymm11,
12753 %rax, (%r9));
12754 + pax_force_retaddr
12755 ret;
12756 ENDPROC(roundsm32_x4_x5_x6_x7_x0_x1_x2_x3_y4_y5_y6_y7_y0_y1_y2_y3_ab)
12757
12758 @@ -820,6 +823,7 @@ __camellia_enc_blk32:
12759 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12760 %ymm15, (key_table)(CTX, %r8, 8), (%rax), 1 * 32(%rax));
12761
12762 + pax_force_retaddr
12763 ret;
12764
12765 .align 8
12766 @@ -905,6 +909,7 @@ __camellia_dec_blk32:
12767 %ymm8, %ymm9, %ymm10, %ymm11, %ymm12, %ymm13, %ymm14,
12768 %ymm15, (key_table)(CTX), (%rax), 1 * 32(%rax));
12769
12770 + pax_force_retaddr
12771 ret;
12772
12773 .align 8
12774 @@ -948,6 +953,7 @@ ENTRY(camellia_ecb_enc_32way)
12775
12776 vzeroupper;
12777
12778 + pax_force_retaddr
12779 ret;
12780 ENDPROC(camellia_ecb_enc_32way)
12781
12782 @@ -980,6 +986,7 @@ ENTRY(camellia_ecb_dec_32way)
12783
12784 vzeroupper;
12785
12786 + pax_force_retaddr
12787 ret;
12788 ENDPROC(camellia_ecb_dec_32way)
12789
12790 @@ -1046,6 +1053,7 @@ ENTRY(camellia_cbc_dec_32way)
12791
12792 vzeroupper;
12793
12794 + pax_force_retaddr
12795 ret;
12796 ENDPROC(camellia_cbc_dec_32way)
12797
12798 @@ -1184,6 +1192,7 @@ ENTRY(camellia_ctr_32way)
12799
12800 vzeroupper;
12801
12802 + pax_force_retaddr
12803 ret;
12804 ENDPROC(camellia_ctr_32way)
12805
12806 @@ -1349,6 +1358,7 @@ camellia_xts_crypt_32way:
12807
12808 vzeroupper;
12809
12810 + pax_force_retaddr
12811 ret;
12812 ENDPROC(camellia_xts_crypt_32way)
12813
12814 diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
12815 index 310319c..db3d7b5 100644
12816 --- a/arch/x86/crypto/camellia-x86_64-asm_64.S
12817 +++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
12818 @@ -21,6 +21,7 @@
12819 */
12820
12821 #include <linux/linkage.h>
12822 +#include <asm/alternative-asm.h>
12823
12824 .file "camellia-x86_64-asm_64.S"
12825 .text
12826 @@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
12827 enc_outunpack(mov, RT1);
12828
12829 movq RRBP, %rbp;
12830 + pax_force_retaddr
12831 ret;
12832
12833 .L__enc_xor:
12834 enc_outunpack(xor, RT1);
12835
12836 movq RRBP, %rbp;
12837 + pax_force_retaddr
12838 ret;
12839 ENDPROC(__camellia_enc_blk)
12840
12841 @@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
12842 dec_outunpack();
12843
12844 movq RRBP, %rbp;
12845 + pax_force_retaddr
12846 ret;
12847 ENDPROC(camellia_dec_blk)
12848
12849 @@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
12850
12851 movq RRBP, %rbp;
12852 popq %rbx;
12853 + pax_force_retaddr
12854 ret;
12855
12856 .L__enc2_xor:
12857 @@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
12858
12859 movq RRBP, %rbp;
12860 popq %rbx;
12861 + pax_force_retaddr
12862 ret;
12863 ENDPROC(__camellia_enc_blk_2way)
12864
12865 @@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
12866
12867 movq RRBP, %rbp;
12868 movq RXOR, %rbx;
12869 + pax_force_retaddr
12870 ret;
12871 ENDPROC(camellia_dec_blk_2way)
12872 diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12873 index c35fd5d..2d8c7db 100644
12874 --- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12875 +++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
12876 @@ -24,6 +24,7 @@
12877 */
12878
12879 #include <linux/linkage.h>
12880 +#include <asm/alternative-asm.h>
12881
12882 .file "cast5-avx-x86_64-asm_64.S"
12883
12884 @@ -281,6 +282,7 @@ __cast5_enc_blk16:
12885 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12886 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12887
12888 + pax_force_retaddr
12889 ret;
12890 ENDPROC(__cast5_enc_blk16)
12891
12892 @@ -352,6 +354,7 @@ __cast5_dec_blk16:
12893 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
12894 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
12895
12896 + pax_force_retaddr
12897 ret;
12898
12899 .L__skip_dec:
12900 @@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
12901 vmovdqu RR4, (6*4*4)(%r11);
12902 vmovdqu RL4, (7*4*4)(%r11);
12903
12904 + pax_force_retaddr
12905 ret;
12906 ENDPROC(cast5_ecb_enc_16way)
12907
12908 @@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
12909 vmovdqu RR4, (6*4*4)(%r11);
12910 vmovdqu RL4, (7*4*4)(%r11);
12911
12912 + pax_force_retaddr
12913 ret;
12914 ENDPROC(cast5_ecb_dec_16way)
12915
12916 @@ -430,10 +435,10 @@ ENTRY(cast5_cbc_dec_16way)
12917 * %rdx: src
12918 */
12919
12920 - pushq %r12;
12921 + pushq %r14;
12922
12923 movq %rsi, %r11;
12924 - movq %rdx, %r12;
12925 + movq %rdx, %r14;
12926
12927 vmovdqu (0*16)(%rdx), RL1;
12928 vmovdqu (1*16)(%rdx), RR1;
12929 @@ -447,16 +452,16 @@ ENTRY(cast5_cbc_dec_16way)
12930 call __cast5_dec_blk16;
12931
12932 /* xor with src */
12933 - vmovq (%r12), RX;
12934 + vmovq (%r14), RX;
12935 vpshufd $0x4f, RX, RX;
12936 vpxor RX, RR1, RR1;
12937 - vpxor 0*16+8(%r12), RL1, RL1;
12938 - vpxor 1*16+8(%r12), RR2, RR2;
12939 - vpxor 2*16+8(%r12), RL2, RL2;
12940 - vpxor 3*16+8(%r12), RR3, RR3;
12941 - vpxor 4*16+8(%r12), RL3, RL3;
12942 - vpxor 5*16+8(%r12), RR4, RR4;
12943 - vpxor 6*16+8(%r12), RL4, RL4;
12944 + vpxor 0*16+8(%r14), RL1, RL1;
12945 + vpxor 1*16+8(%r14), RR2, RR2;
12946 + vpxor 2*16+8(%r14), RL2, RL2;
12947 + vpxor 3*16+8(%r14), RR3, RR3;
12948 + vpxor 4*16+8(%r14), RL3, RL3;
12949 + vpxor 5*16+8(%r14), RR4, RR4;
12950 + vpxor 6*16+8(%r14), RL4, RL4;
12951
12952 vmovdqu RR1, (0*16)(%r11);
12953 vmovdqu RL1, (1*16)(%r11);
12954 @@ -467,8 +472,9 @@ ENTRY(cast5_cbc_dec_16way)
12955 vmovdqu RR4, (6*16)(%r11);
12956 vmovdqu RL4, (7*16)(%r11);
12957
12958 - popq %r12;
12959 + popq %r14;
12960
12961 + pax_force_retaddr
12962 ret;
12963 ENDPROC(cast5_cbc_dec_16way)
12964
12965 @@ -480,10 +486,10 @@ ENTRY(cast5_ctr_16way)
12966 * %rcx: iv (big endian, 64bit)
12967 */
12968
12969 - pushq %r12;
12970 + pushq %r14;
12971
12972 movq %rsi, %r11;
12973 - movq %rdx, %r12;
12974 + movq %rdx, %r14;
12975
12976 vpcmpeqd RTMP, RTMP, RTMP;
12977 vpsrldq $8, RTMP, RTMP; /* low: -1, high: 0 */
12978 @@ -523,14 +529,14 @@ ENTRY(cast5_ctr_16way)
12979 call __cast5_enc_blk16;
12980
12981 /* dst = src ^ iv */
12982 - vpxor (0*16)(%r12), RR1, RR1;
12983 - vpxor (1*16)(%r12), RL1, RL1;
12984 - vpxor (2*16)(%r12), RR2, RR2;
12985 - vpxor (3*16)(%r12), RL2, RL2;
12986 - vpxor (4*16)(%r12), RR3, RR3;
12987 - vpxor (5*16)(%r12), RL3, RL3;
12988 - vpxor (6*16)(%r12), RR4, RR4;
12989 - vpxor (7*16)(%r12), RL4, RL4;
12990 + vpxor (0*16)(%r14), RR1, RR1;
12991 + vpxor (1*16)(%r14), RL1, RL1;
12992 + vpxor (2*16)(%r14), RR2, RR2;
12993 + vpxor (3*16)(%r14), RL2, RL2;
12994 + vpxor (4*16)(%r14), RR3, RR3;
12995 + vpxor (5*16)(%r14), RL3, RL3;
12996 + vpxor (6*16)(%r14), RR4, RR4;
12997 + vpxor (7*16)(%r14), RL4, RL4;
12998 vmovdqu RR1, (0*16)(%r11);
12999 vmovdqu RL1, (1*16)(%r11);
13000 vmovdqu RR2, (2*16)(%r11);
13001 @@ -540,7 +546,8 @@ ENTRY(cast5_ctr_16way)
13002 vmovdqu RR4, (6*16)(%r11);
13003 vmovdqu RL4, (7*16)(%r11);
13004
13005 - popq %r12;
13006 + popq %r14;
13007
13008 + pax_force_retaddr
13009 ret;
13010 ENDPROC(cast5_ctr_16way)
13011 diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13012 index e3531f8..e123f35 100644
13013 --- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13014 +++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
13015 @@ -24,6 +24,7 @@
13016 */
13017
13018 #include <linux/linkage.h>
13019 +#include <asm/alternative-asm.h>
13020 #include "glue_helper-asm-avx.S"
13021
13022 .file "cast6-avx-x86_64-asm_64.S"
13023 @@ -295,6 +296,7 @@ __cast6_enc_blk8:
13024 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13025 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13026
13027 + pax_force_retaddr
13028 ret;
13029 ENDPROC(__cast6_enc_blk8)
13030
13031 @@ -340,6 +342,7 @@ __cast6_dec_blk8:
13032 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
13033 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
13034
13035 + pax_force_retaddr
13036 ret;
13037 ENDPROC(__cast6_dec_blk8)
13038
13039 @@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
13040
13041 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13042
13043 + pax_force_retaddr
13044 ret;
13045 ENDPROC(cast6_ecb_enc_8way)
13046
13047 @@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
13048
13049 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13050
13051 + pax_force_retaddr
13052 ret;
13053 ENDPROC(cast6_ecb_dec_8way)
13054
13055 @@ -386,19 +391,20 @@ ENTRY(cast6_cbc_dec_8way)
13056 * %rdx: src
13057 */
13058
13059 - pushq %r12;
13060 + pushq %r14;
13061
13062 movq %rsi, %r11;
13063 - movq %rdx, %r12;
13064 + movq %rdx, %r14;
13065
13066 load_8way(%rdx, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13067
13068 call __cast6_dec_blk8;
13069
13070 - store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13071 + store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13072
13073 - popq %r12;
13074 + popq %r14;
13075
13076 + pax_force_retaddr
13077 ret;
13078 ENDPROC(cast6_cbc_dec_8way)
13079
13080 @@ -410,20 +416,21 @@ ENTRY(cast6_ctr_8way)
13081 * %rcx: iv (little endian, 128bit)
13082 */
13083
13084 - pushq %r12;
13085 + pushq %r14;
13086
13087 movq %rsi, %r11;
13088 - movq %rdx, %r12;
13089 + movq %rdx, %r14;
13090
13091 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13092 RD2, RX, RKR, RKM);
13093
13094 call __cast6_enc_blk8;
13095
13096 - store_ctr_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13097 + store_ctr_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13098
13099 - popq %r12;
13100 + popq %r14;
13101
13102 + pax_force_retaddr
13103 ret;
13104 ENDPROC(cast6_ctr_8way)
13105
13106 @@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
13107 /* dst <= regs xor IVs(in dst) */
13108 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13109
13110 + pax_force_retaddr
13111 ret;
13112 ENDPROC(cast6_xts_enc_8way)
13113
13114 @@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
13115 /* dst <= regs xor IVs(in dst) */
13116 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13117
13118 + pax_force_retaddr
13119 ret;
13120 ENDPROC(cast6_xts_dec_8way)
13121 diff --git a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13122 index dbc4339..de6e120 100644
13123 --- a/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13124 +++ b/arch/x86/crypto/crc32c-pcl-intel-asm_64.S
13125 @@ -45,6 +45,7 @@
13126
13127 #include <asm/inst.h>
13128 #include <linux/linkage.h>
13129 +#include <asm/alternative-asm.h>
13130
13131 ## ISCSI CRC 32 Implementation with crc32 and pclmulqdq Instruction
13132
13133 @@ -312,6 +313,7 @@ do_return:
13134 popq %rsi
13135 popq %rdi
13136 popq %rbx
13137 + pax_force_retaddr
13138 ret
13139
13140 ################################################################
13141 diff --git a/arch/x86/crypto/ghash-clmulni-intel_asm.S b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13142 index 586f41a..d02851e 100644
13143 --- a/arch/x86/crypto/ghash-clmulni-intel_asm.S
13144 +++ b/arch/x86/crypto/ghash-clmulni-intel_asm.S
13145 @@ -18,6 +18,7 @@
13146
13147 #include <linux/linkage.h>
13148 #include <asm/inst.h>
13149 +#include <asm/alternative-asm.h>
13150
13151 .data
13152
13153 @@ -93,6 +94,7 @@ __clmul_gf128mul_ble:
13154 psrlq $1, T2
13155 pxor T2, T1
13156 pxor T1, DATA
13157 + pax_force_retaddr
13158 ret
13159 ENDPROC(__clmul_gf128mul_ble)
13160
13161 @@ -105,6 +107,7 @@ ENTRY(clmul_ghash_mul)
13162 call __clmul_gf128mul_ble
13163 PSHUFB_XMM BSWAP DATA
13164 movups DATA, (%rdi)
13165 + pax_force_retaddr
13166 ret
13167 ENDPROC(clmul_ghash_mul)
13168
13169 @@ -132,6 +135,7 @@ ENTRY(clmul_ghash_update)
13170 PSHUFB_XMM BSWAP DATA
13171 movups DATA, (%rdi)
13172 .Lupdate_just_ret:
13173 + pax_force_retaddr
13174 ret
13175 ENDPROC(clmul_ghash_update)
13176
13177 @@ -157,5 +161,6 @@ ENTRY(clmul_ghash_setkey)
13178 pand .Lpoly, %xmm1
13179 pxor %xmm1, %xmm0
13180 movups %xmm0, (%rdi)
13181 + pax_force_retaddr
13182 ret
13183 ENDPROC(clmul_ghash_setkey)
13184 diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13185 index 9279e0b..c4b3d2c 100644
13186 --- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
13187 +++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
13188 @@ -1,4 +1,5 @@
13189 #include <linux/linkage.h>
13190 +#include <asm/alternative-asm.h>
13191
13192 # enter salsa20_encrypt_bytes
13193 ENTRY(salsa20_encrypt_bytes)
13194 @@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
13195 add %r11,%rsp
13196 mov %rdi,%rax
13197 mov %rsi,%rdx
13198 + pax_force_retaddr
13199 ret
13200 # bytesatleast65:
13201 ._bytesatleast65:
13202 @@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
13203 add %r11,%rsp
13204 mov %rdi,%rax
13205 mov %rsi,%rdx
13206 + pax_force_retaddr
13207 ret
13208 ENDPROC(salsa20_keysetup)
13209
13210 @@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
13211 add %r11,%rsp
13212 mov %rdi,%rax
13213 mov %rsi,%rdx
13214 + pax_force_retaddr
13215 ret
13216 ENDPROC(salsa20_ivsetup)
13217 diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13218 index 2f202f4..d9164d6 100644
13219 --- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13220 +++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
13221 @@ -24,6 +24,7 @@
13222 */
13223
13224 #include <linux/linkage.h>
13225 +#include <asm/alternative-asm.h>
13226 #include "glue_helper-asm-avx.S"
13227
13228 .file "serpent-avx-x86_64-asm_64.S"
13229 @@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
13230 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13231 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13232
13233 + pax_force_retaddr
13234 ret;
13235 ENDPROC(__serpent_enc_blk8_avx)
13236
13237 @@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
13238 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13239 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13240
13241 + pax_force_retaddr
13242 ret;
13243 ENDPROC(__serpent_dec_blk8_avx)
13244
13245 @@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
13246
13247 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13248
13249 + pax_force_retaddr
13250 ret;
13251 ENDPROC(serpent_ecb_enc_8way_avx)
13252
13253 @@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
13254
13255 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13256
13257 + pax_force_retaddr
13258 ret;
13259 ENDPROC(serpent_ecb_dec_8way_avx)
13260
13261 @@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
13262
13263 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13264
13265 + pax_force_retaddr
13266 ret;
13267 ENDPROC(serpent_cbc_dec_8way_avx)
13268
13269 @@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
13270
13271 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13272
13273 + pax_force_retaddr
13274 ret;
13275 ENDPROC(serpent_ctr_8way_avx)
13276
13277 @@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
13278 /* dst <= regs xor IVs(in dst) */
13279 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13280
13281 + pax_force_retaddr
13282 ret;
13283 ENDPROC(serpent_xts_enc_8way_avx)
13284
13285 @@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
13286 /* dst <= regs xor IVs(in dst) */
13287 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
13288
13289 + pax_force_retaddr
13290 ret;
13291 ENDPROC(serpent_xts_dec_8way_avx)
13292 diff --git a/arch/x86/crypto/serpent-avx2-asm_64.S b/arch/x86/crypto/serpent-avx2-asm_64.S
13293 index b222085..abd483c 100644
13294 --- a/arch/x86/crypto/serpent-avx2-asm_64.S
13295 +++ b/arch/x86/crypto/serpent-avx2-asm_64.S
13296 @@ -15,6 +15,7 @@
13297 */
13298
13299 #include <linux/linkage.h>
13300 +#include <asm/alternative-asm.h>
13301 #include "glue_helper-asm-avx2.S"
13302
13303 .file "serpent-avx2-asm_64.S"
13304 @@ -610,6 +611,7 @@ __serpent_enc_blk16:
13305 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13306 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13307
13308 + pax_force_retaddr
13309 ret;
13310 ENDPROC(__serpent_enc_blk16)
13311
13312 @@ -664,6 +666,7 @@ __serpent_dec_blk16:
13313 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13314 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13315
13316 + pax_force_retaddr
13317 ret;
13318 ENDPROC(__serpent_dec_blk16)
13319
13320 @@ -684,6 +687,7 @@ ENTRY(serpent_ecb_enc_16way)
13321
13322 vzeroupper;
13323
13324 + pax_force_retaddr
13325 ret;
13326 ENDPROC(serpent_ecb_enc_16way)
13327
13328 @@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_16way)
13329
13330 vzeroupper;
13331
13332 + pax_force_retaddr
13333 ret;
13334 ENDPROC(serpent_ecb_dec_16way)
13335
13336 @@ -725,6 +730,7 @@ ENTRY(serpent_cbc_dec_16way)
13337
13338 vzeroupper;
13339
13340 + pax_force_retaddr
13341 ret;
13342 ENDPROC(serpent_cbc_dec_16way)
13343
13344 @@ -748,6 +754,7 @@ ENTRY(serpent_ctr_16way)
13345
13346 vzeroupper;
13347
13348 + pax_force_retaddr
13349 ret;
13350 ENDPROC(serpent_ctr_16way)
13351
13352 @@ -772,6 +779,7 @@ ENTRY(serpent_xts_enc_16way)
13353
13354 vzeroupper;
13355
13356 + pax_force_retaddr
13357 ret;
13358 ENDPROC(serpent_xts_enc_16way)
13359
13360 @@ -796,5 +804,6 @@ ENTRY(serpent_xts_dec_16way)
13361
13362 vzeroupper;
13363
13364 + pax_force_retaddr
13365 ret;
13366 ENDPROC(serpent_xts_dec_16way)
13367 diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13368 index acc066c..1559cc4 100644
13369 --- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13370 +++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
13371 @@ -25,6 +25,7 @@
13372 */
13373
13374 #include <linux/linkage.h>
13375 +#include <asm/alternative-asm.h>
13376
13377 .file "serpent-sse2-x86_64-asm_64.S"
13378 .text
13379 @@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
13380 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13381 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13382
13383 + pax_force_retaddr
13384 ret;
13385
13386 .L__enc_xor8:
13387 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
13388 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
13389
13390 + pax_force_retaddr
13391 ret;
13392 ENDPROC(__serpent_enc_blk_8way)
13393
13394 @@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
13395 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
13396 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
13397
13398 + pax_force_retaddr
13399 ret;
13400 ENDPROC(serpent_dec_blk_8way)
13401 diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
13402 index a410950..9dfe7ad 100644
13403 --- a/arch/x86/crypto/sha1_ssse3_asm.S
13404 +++ b/arch/x86/crypto/sha1_ssse3_asm.S
13405 @@ -29,6 +29,7 @@
13406 */
13407
13408 #include <linux/linkage.h>
13409 +#include <asm/alternative-asm.h>
13410
13411 #define CTX %rdi // arg1
13412 #define BUF %rsi // arg2
13413 @@ -75,9 +76,9 @@
13414
13415 push %rbx
13416 push %rbp
13417 - push %r12
13418 + push %r14
13419
13420 - mov %rsp, %r12
13421 + mov %rsp, %r14
13422 sub $64, %rsp # allocate workspace
13423 and $~15, %rsp # align stack
13424
13425 @@ -99,11 +100,12 @@
13426 xor %rax, %rax
13427 rep stosq
13428
13429 - mov %r12, %rsp # deallocate workspace
13430 + mov %r14, %rsp # deallocate workspace
13431
13432 - pop %r12
13433 + pop %r14
13434 pop %rbp
13435 pop %rbx
13436 + pax_force_retaddr
13437 ret
13438
13439 ENDPROC(\name)
13440 diff --git a/arch/x86/crypto/sha256-avx-asm.S b/arch/x86/crypto/sha256-avx-asm.S
13441 index 642f156..51a513c 100644
13442 --- a/arch/x86/crypto/sha256-avx-asm.S
13443 +++ b/arch/x86/crypto/sha256-avx-asm.S
13444 @@ -49,6 +49,7 @@
13445
13446 #ifdef CONFIG_AS_AVX
13447 #include <linux/linkage.h>
13448 +#include <asm/alternative-asm.h>
13449
13450 ## assume buffers not aligned
13451 #define VMOVDQ vmovdqu
13452 @@ -460,6 +461,7 @@ done_hash:
13453 popq %r13
13454 popq %rbp
13455 popq %rbx
13456 + pax_force_retaddr
13457 ret
13458 ENDPROC(sha256_transform_avx)
13459
13460 diff --git a/arch/x86/crypto/sha256-avx2-asm.S b/arch/x86/crypto/sha256-avx2-asm.S
13461 index 9e86944..3795e6a 100644
13462 --- a/arch/x86/crypto/sha256-avx2-asm.S
13463 +++ b/arch/x86/crypto/sha256-avx2-asm.S
13464 @@ -50,6 +50,7 @@
13465
13466 #ifdef CONFIG_AS_AVX2
13467 #include <linux/linkage.h>
13468 +#include <asm/alternative-asm.h>
13469
13470 ## assume buffers not aligned
13471 #define VMOVDQ vmovdqu
13472 @@ -720,6 +721,7 @@ done_hash:
13473 popq %r12
13474 popq %rbp
13475 popq %rbx
13476 + pax_force_retaddr
13477 ret
13478 ENDPROC(sha256_transform_rorx)
13479
13480 diff --git a/arch/x86/crypto/sha256-ssse3-asm.S b/arch/x86/crypto/sha256-ssse3-asm.S
13481 index f833b74..8c62a9e 100644
13482 --- a/arch/x86/crypto/sha256-ssse3-asm.S
13483 +++ b/arch/x86/crypto/sha256-ssse3-asm.S
13484 @@ -47,6 +47,7 @@
13485 ########################################################################
13486
13487 #include <linux/linkage.h>
13488 +#include <asm/alternative-asm.h>
13489
13490 ## assume buffers not aligned
13491 #define MOVDQ movdqu
13492 @@ -471,6 +472,7 @@ done_hash:
13493 popq %rbp
13494 popq %rbx
13495
13496 + pax_force_retaddr
13497 ret
13498 ENDPROC(sha256_transform_ssse3)
13499
13500 diff --git a/arch/x86/crypto/sha512-avx-asm.S b/arch/x86/crypto/sha512-avx-asm.S
13501 index 974dde9..a823ff9 100644
13502 --- a/arch/x86/crypto/sha512-avx-asm.S
13503 +++ b/arch/x86/crypto/sha512-avx-asm.S
13504 @@ -49,6 +49,7 @@
13505
13506 #ifdef CONFIG_AS_AVX
13507 #include <linux/linkage.h>
13508 +#include <asm/alternative-asm.h>
13509
13510 .text
13511
13512 @@ -364,6 +365,7 @@ updateblock:
13513 mov frame_RSPSAVE(%rsp), %rsp
13514
13515 nowork:
13516 + pax_force_retaddr
13517 ret
13518 ENDPROC(sha512_transform_avx)
13519
13520 diff --git a/arch/x86/crypto/sha512-avx2-asm.S b/arch/x86/crypto/sha512-avx2-asm.S
13521 index 568b961..ed20c37 100644
13522 --- a/arch/x86/crypto/sha512-avx2-asm.S
13523 +++ b/arch/x86/crypto/sha512-avx2-asm.S
13524 @@ -51,6 +51,7 @@
13525
13526 #ifdef CONFIG_AS_AVX2
13527 #include <linux/linkage.h>
13528 +#include <asm/alternative-asm.h>
13529
13530 .text
13531
13532 @@ -678,6 +679,7 @@ done_hash:
13533
13534 # Restore Stack Pointer
13535 mov frame_RSPSAVE(%rsp), %rsp
13536 + pax_force_retaddr
13537 ret
13538 ENDPROC(sha512_transform_rorx)
13539
13540 diff --git a/arch/x86/crypto/sha512-ssse3-asm.S b/arch/x86/crypto/sha512-ssse3-asm.S
13541 index fb56855..6edd768 100644
13542 --- a/arch/x86/crypto/sha512-ssse3-asm.S
13543 +++ b/arch/x86/crypto/sha512-ssse3-asm.S
13544 @@ -48,6 +48,7 @@
13545 ########################################################################
13546
13547 #include <linux/linkage.h>
13548 +#include <asm/alternative-asm.h>
13549
13550 .text
13551
13552 @@ -363,6 +364,7 @@ updateblock:
13553 mov frame_RSPSAVE(%rsp), %rsp
13554
13555 nowork:
13556 + pax_force_retaddr
13557 ret
13558 ENDPROC(sha512_transform_ssse3)
13559
13560 diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
13561 index 0505813..b067311 100644
13562 --- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
13563 +++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
13564 @@ -24,6 +24,7 @@
13565 */
13566
13567 #include <linux/linkage.h>
13568 +#include <asm/alternative-asm.h>
13569 #include "glue_helper-asm-avx.S"
13570
13571 .file "twofish-avx-x86_64-asm_64.S"
13572 @@ -284,6 +285,7 @@ __twofish_enc_blk8:
13573 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
13574 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
13575
13576 + pax_force_retaddr
13577 ret;
13578 ENDPROC(__twofish_enc_blk8)
13579
13580 @@ -324,6 +326,7 @@ __twofish_dec_blk8:
13581 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
13582 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
13583
13584 + pax_force_retaddr
13585 ret;
13586 ENDPROC(__twofish_dec_blk8)
13587
13588 @@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
13589
13590 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13591
13592 + pax_force_retaddr
13593 ret;
13594 ENDPROC(twofish_ecb_enc_8way)
13595
13596 @@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
13597
13598 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13599
13600 + pax_force_retaddr
13601 ret;
13602 ENDPROC(twofish_ecb_dec_8way)
13603
13604 @@ -370,19 +375,20 @@ ENTRY(twofish_cbc_dec_8way)
13605 * %rdx: src
13606 */
13607
13608 - pushq %r12;
13609 + pushq %r14;
13610
13611 movq %rsi, %r11;
13612 - movq %rdx, %r12;
13613 + movq %rdx, %r14;
13614
13615 load_8way(%rdx, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13616
13617 call __twofish_dec_blk8;
13618
13619 - store_cbc_8way(%r12, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13620 + store_cbc_8way(%r14, %r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13621
13622 - popq %r12;
13623 + popq %r14;
13624
13625 + pax_force_retaddr
13626 ret;
13627 ENDPROC(twofish_cbc_dec_8way)
13628
13629 @@ -394,20 +400,21 @@ ENTRY(twofish_ctr_8way)
13630 * %rcx: iv (little endian, 128bit)
13631 */
13632
13633 - pushq %r12;
13634 + pushq %r14;
13635
13636 movq %rsi, %r11;
13637 - movq %rdx, %r12;
13638 + movq %rdx, %r14;
13639
13640 load_ctr_8way(%rcx, .Lbswap128_mask, RA1, RB1, RC1, RD1, RA2, RB2, RC2,
13641 RD2, RX0, RX1, RY0);
13642
13643 call __twofish_enc_blk8;
13644
13645 - store_ctr_8way(%r12, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13646 + store_ctr_8way(%r14, %r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13647
13648 - popq %r12;
13649 + popq %r14;
13650
13651 + pax_force_retaddr
13652 ret;
13653 ENDPROC(twofish_ctr_8way)
13654
13655 @@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
13656 /* dst <= regs xor IVs(in dst) */
13657 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
13658
13659 + pax_force_retaddr
13660 ret;
13661 ENDPROC(twofish_xts_enc_8way)
13662
13663 @@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
13664 /* dst <= regs xor IVs(in dst) */
13665 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
13666
13667 + pax_force_retaddr
13668 ret;
13669 ENDPROC(twofish_xts_dec_8way)
13670 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13671 index 1c3b7ce..02f578d 100644
13672 --- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13673 +++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
13674 @@ -21,6 +21,7 @@
13675 */
13676
13677 #include <linux/linkage.h>
13678 +#include <asm/alternative-asm.h>
13679
13680 .file "twofish-x86_64-asm-3way.S"
13681 .text
13682 @@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
13683 popq %r13;
13684 popq %r14;
13685 popq %r15;
13686 + pax_force_retaddr
13687 ret;
13688
13689 .L__enc_xor3:
13690 @@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
13691 popq %r13;
13692 popq %r14;
13693 popq %r15;
13694 + pax_force_retaddr
13695 ret;
13696 ENDPROC(__twofish_enc_blk_3way)
13697
13698 @@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
13699 popq %r13;
13700 popq %r14;
13701 popq %r15;
13702 + pax_force_retaddr
13703 ret;
13704 ENDPROC(twofish_dec_blk_3way)
13705 diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
13706 index a039d21..524b8b2 100644
13707 --- a/arch/x86/crypto/twofish-x86_64-asm_64.S
13708 +++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
13709 @@ -22,6 +22,7 @@
13710
13711 #include <linux/linkage.h>
13712 #include <asm/asm-offsets.h>
13713 +#include <asm/alternative-asm.h>
13714
13715 #define a_offset 0
13716 #define b_offset 4
13717 @@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
13718
13719 popq R1
13720 movq $1,%rax
13721 + pax_force_retaddr
13722 ret
13723 ENDPROC(twofish_enc_blk)
13724
13725 @@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
13726
13727 popq R1
13728 movq $1,%rax
13729 + pax_force_retaddr
13730 ret
13731 ENDPROC(twofish_dec_blk)
13732 diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
13733 index bae3aba..c1788c1 100644
13734 --- a/arch/x86/ia32/ia32_aout.c
13735 +++ b/arch/x86/ia32/ia32_aout.c
13736 @@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
13737 unsigned long dump_start, dump_size;
13738 struct user32 dump;
13739
13740 + memset(&dump, 0, sizeof(dump));
13741 +
13742 fs = get_fs();
13743 set_fs(KERNEL_DS);
13744 has_dumped = 1;
13745 diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
13746 index 665a730..8e7a67a 100644
13747 --- a/arch/x86/ia32/ia32_signal.c
13748 +++ b/arch/x86/ia32/ia32_signal.c
13749 @@ -338,7 +338,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
13750 sp -= frame_size;
13751 /* Align the stack pointer according to the i386 ABI,
13752 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
13753 - sp = ((sp + 4) & -16ul) - 4;
13754 + sp = ((sp - 12) & -16ul) - 4;
13755 return (void __user *) sp;
13756 }
13757
13758 @@ -396,7 +396,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
13759 * These are actually not used anymore, but left because some
13760 * gdb versions depend on them as a marker.
13761 */
13762 - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13763 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13764 } put_user_catch(err);
13765
13766 if (err)
13767 @@ -438,7 +438,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13768 0xb8,
13769 __NR_ia32_rt_sigreturn,
13770 0x80cd,
13771 - 0,
13772 + 0
13773 };
13774
13775 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
13776 @@ -461,16 +461,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
13777
13778 if (ksig->ka.sa.sa_flags & SA_RESTORER)
13779 restorer = ksig->ka.sa.sa_restorer;
13780 + else if (current->mm->context.vdso)
13781 + /* Return stub is in 32bit vsyscall page */
13782 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
13783 else
13784 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
13785 - rt_sigreturn);
13786 + restorer = &frame->retcode;
13787 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
13788
13789 /*
13790 * Not actually used anymore, but left because some gdb
13791 * versions need it.
13792 */
13793 - put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
13794 + put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
13795 } put_user_catch(err);
13796
13797 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
13798 diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
13799 index 4299eb0..c0687a7 100644
13800 --- a/arch/x86/ia32/ia32entry.S
13801 +++ b/arch/x86/ia32/ia32entry.S
13802 @@ -15,8 +15,10 @@
13803 #include <asm/irqflags.h>
13804 #include <asm/asm.h>
13805 #include <asm/smap.h>
13806 +#include <asm/pgtable.h>
13807 #include <linux/linkage.h>
13808 #include <linux/err.h>
13809 +#include <asm/alternative-asm.h>
13810
13811 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13812 #include <linux/elf-em.h>
13813 @@ -62,12 +64,12 @@
13814 */
13815 .macro LOAD_ARGS32 offset, _r9=0
13816 .if \_r9
13817 - movl \offset+16(%rsp),%r9d
13818 + movl \offset+R9(%rsp),%r9d
13819 .endif
13820 - movl \offset+40(%rsp),%ecx
13821 - movl \offset+48(%rsp),%edx
13822 - movl \offset+56(%rsp),%esi
13823 - movl \offset+64(%rsp),%edi
13824 + movl \offset+RCX(%rsp),%ecx
13825 + movl \offset+RDX(%rsp),%edx
13826 + movl \offset+RSI(%rsp),%esi
13827 + movl \offset+RDI(%rsp),%edi
13828 movl %eax,%eax /* zero extension */
13829 .endm
13830
13831 @@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
13832 ENDPROC(native_irq_enable_sysexit)
13833 #endif
13834
13835 + .macro pax_enter_kernel_user
13836 + pax_set_fptr_mask
13837 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13838 + call pax_enter_kernel_user
13839 +#endif
13840 + .endm
13841 +
13842 + .macro pax_exit_kernel_user
13843 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13844 + call pax_exit_kernel_user
13845 +#endif
13846 +#ifdef CONFIG_PAX_RANDKSTACK
13847 + pushq %rax
13848 + pushq %r11
13849 + call pax_randomize_kstack
13850 + popq %r11
13851 + popq %rax
13852 +#endif
13853 + .endm
13854 +
13855 + .macro pax_erase_kstack
13856 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13857 + call pax_erase_kstack
13858 +#endif
13859 + .endm
13860 +
13861 /*
13862 * 32bit SYSENTER instruction entry.
13863 *
13864 @@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
13865 CFI_REGISTER rsp,rbp
13866 SWAPGS_UNSAFE_STACK
13867 movq PER_CPU_VAR(kernel_stack), %rsp
13868 - addq $(KERNEL_STACK_OFFSET),%rsp
13869 - /*
13870 - * No need to follow this irqs on/off section: the syscall
13871 - * disabled irqs, here we enable it straight after entry:
13872 - */
13873 - ENABLE_INTERRUPTS(CLBR_NONE)
13874 movl %ebp,%ebp /* zero extension */
13875 pushq_cfi $__USER32_DS
13876 /*CFI_REL_OFFSET ss,0*/
13877 @@ -135,24 +157,49 @@ ENTRY(ia32_sysenter_target)
13878 CFI_REL_OFFSET rsp,0
13879 pushfq_cfi
13880 /*CFI_REL_OFFSET rflags,0*/
13881 - movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
13882 - CFI_REGISTER rip,r10
13883 + orl $X86_EFLAGS_IF,(%rsp)
13884 + GET_THREAD_INFO(%r11)
13885 + movl TI_sysenter_return(%r11), %r11d
13886 + CFI_REGISTER rip,r11
13887 pushq_cfi $__USER32_CS
13888 /*CFI_REL_OFFSET cs,0*/
13889 movl %eax, %eax
13890 - pushq_cfi %r10
13891 + pushq_cfi %r11
13892 CFI_REL_OFFSET rip,0
13893 pushq_cfi %rax
13894 cld
13895 SAVE_ARGS 0,1,0
13896 + pax_enter_kernel_user
13897 +
13898 +#ifdef CONFIG_PAX_RANDKSTACK
13899 + pax_erase_kstack
13900 +#endif
13901 +
13902 + /*
13903 + * No need to follow this irqs on/off section: the syscall
13904 + * disabled irqs, here we enable it straight after entry:
13905 + */
13906 + ENABLE_INTERRUPTS(CLBR_NONE)
13907 /* no need to do an access_ok check here because rbp has been
13908 32bit zero extended */
13909 +
13910 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13911 + addq pax_user_shadow_base,%rbp
13912 + ASM_PAX_OPEN_USERLAND
13913 +#endif
13914 +
13915 ASM_STAC
13916 1: movl (%rbp),%ebp
13917 _ASM_EXTABLE(1b,ia32_badarg)
13918 ASM_CLAC
13919 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13920 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13921 +
13922 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13923 + ASM_PAX_CLOSE_USERLAND
13924 +#endif
13925 +
13926 + GET_THREAD_INFO(%r11)
13927 + orl $TS_COMPAT,TI_status(%r11)
13928 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
13929 CFI_REMEMBER_STATE
13930 jnz sysenter_tracesys
13931 cmpq $(IA32_NR_syscalls-1),%rax
13932 @@ -162,15 +209,18 @@ sysenter_do_call:
13933 sysenter_dispatch:
13934 call *ia32_sys_call_table(,%rax,8)
13935 movq %rax,RAX-ARGOFFSET(%rsp)
13936 + GET_THREAD_INFO(%r11)
13937 DISABLE_INTERRUPTS(CLBR_NONE)
13938 TRACE_IRQS_OFF
13939 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13940 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
13941 jnz sysexit_audit
13942 sysexit_from_sys_call:
13943 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13944 + pax_exit_kernel_user
13945 + pax_erase_kstack
13946 + andl $~TS_COMPAT,TI_status(%r11)
13947 /* clear IF, that popfq doesn't enable interrupts early */
13948 - andl $~0x200,EFLAGS-R11(%rsp)
13949 - movl RIP-R11(%rsp),%edx /* User %eip */
13950 + andl $~X86_EFLAGS_IF,EFLAGS(%rsp)
13951 + movl RIP(%rsp),%edx /* User %eip */
13952 CFI_REGISTER rip,rdx
13953 RESTORE_ARGS 0,24,0,0,0,0
13954 xorq %r8,%r8
13955 @@ -193,6 +243,9 @@ sysexit_from_sys_call:
13956 movl %eax,%esi /* 2nd arg: syscall number */
13957 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
13958 call __audit_syscall_entry
13959 +
13960 + pax_erase_kstack
13961 +
13962 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
13963 cmpq $(IA32_NR_syscalls-1),%rax
13964 ja ia32_badsys
13965 @@ -204,7 +257,7 @@ sysexit_from_sys_call:
13966 .endm
13967
13968 .macro auditsys_exit exit
13969 - testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13970 + testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
13971 jnz ia32_ret_from_sys_call
13972 TRACE_IRQS_ON
13973 ENABLE_INTERRUPTS(CLBR_NONE)
13974 @@ -215,11 +268,12 @@ sysexit_from_sys_call:
13975 1: setbe %al /* 1 if error, 0 if not */
13976 movzbl %al,%edi /* zero-extend that into %edi */
13977 call __audit_syscall_exit
13978 + GET_THREAD_INFO(%r11)
13979 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
13980 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
13981 DISABLE_INTERRUPTS(CLBR_NONE)
13982 TRACE_IRQS_OFF
13983 - testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13984 + testl %edi,TI_flags(%r11)
13985 jz \exit
13986 CLEAR_RREGS -ARGOFFSET
13987 jmp int_with_check
13988 @@ -237,7 +291,7 @@ sysexit_audit:
13989
13990 sysenter_tracesys:
13991 #ifdef CONFIG_AUDITSYSCALL
13992 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
13993 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
13994 jz sysenter_auditsys
13995 #endif
13996 SAVE_REST
13997 @@ -249,6 +303,9 @@ sysenter_tracesys:
13998 RESTORE_REST
13999 cmpq $(IA32_NR_syscalls-1),%rax
14000 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
14001 +
14002 + pax_erase_kstack
14003 +
14004 jmp sysenter_do_call
14005 CFI_ENDPROC
14006 ENDPROC(ia32_sysenter_target)
14007 @@ -276,19 +333,25 @@ ENDPROC(ia32_sysenter_target)
14008 ENTRY(ia32_cstar_target)
14009 CFI_STARTPROC32 simple
14010 CFI_SIGNAL_FRAME
14011 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
14012 + CFI_DEF_CFA rsp,0
14013 CFI_REGISTER rip,rcx
14014 /*CFI_REGISTER rflags,r11*/
14015 SWAPGS_UNSAFE_STACK
14016 movl %esp,%r8d
14017 CFI_REGISTER rsp,r8
14018 movq PER_CPU_VAR(kernel_stack),%rsp
14019 + SAVE_ARGS 8*6,0,0
14020 + pax_enter_kernel_user
14021 +
14022 +#ifdef CONFIG_PAX_RANDKSTACK
14023 + pax_erase_kstack
14024 +#endif
14025 +
14026 /*
14027 * No need to follow this irqs on/off section: the syscall
14028 * disabled irqs and here we enable it straight after entry:
14029 */
14030 ENABLE_INTERRUPTS(CLBR_NONE)
14031 - SAVE_ARGS 8,0,0
14032 movl %eax,%eax /* zero extension */
14033 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
14034 movq %rcx,RIP-ARGOFFSET(%rsp)
14035 @@ -304,12 +367,25 @@ ENTRY(ia32_cstar_target)
14036 /* no need to do an access_ok check here because r8 has been
14037 32bit zero extended */
14038 /* hardware stack frame is complete now */
14039 +
14040 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14041 + ASM_PAX_OPEN_USERLAND
14042 + movq pax_user_shadow_base,%r8
14043 + addq RSP-ARGOFFSET(%rsp),%r8
14044 +#endif
14045 +
14046 ASM_STAC
14047 1: movl (%r8),%r9d
14048 _ASM_EXTABLE(1b,ia32_badarg)
14049 ASM_CLAC
14050 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14051 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14052 +
14053 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14054 + ASM_PAX_CLOSE_USERLAND
14055 +#endif
14056 +
14057 + GET_THREAD_INFO(%r11)
14058 + orl $TS_COMPAT,TI_status(%r11)
14059 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14060 CFI_REMEMBER_STATE
14061 jnz cstar_tracesys
14062 cmpq $IA32_NR_syscalls-1,%rax
14063 @@ -319,13 +395,16 @@ cstar_do_call:
14064 cstar_dispatch:
14065 call *ia32_sys_call_table(,%rax,8)
14066 movq %rax,RAX-ARGOFFSET(%rsp)
14067 + GET_THREAD_INFO(%r11)
14068 DISABLE_INTERRUPTS(CLBR_NONE)
14069 TRACE_IRQS_OFF
14070 - testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14071 + testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
14072 jnz sysretl_audit
14073 sysretl_from_sys_call:
14074 - andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14075 - RESTORE_ARGS 0,-ARG_SKIP,0,0,0
14076 + pax_exit_kernel_user
14077 + pax_erase_kstack
14078 + andl $~TS_COMPAT,TI_status(%r11)
14079 + RESTORE_ARGS 0,-ORIG_RAX,0,0,0
14080 movl RIP-ARGOFFSET(%rsp),%ecx
14081 CFI_REGISTER rip,rcx
14082 movl EFLAGS-ARGOFFSET(%rsp),%r11d
14083 @@ -352,7 +431,7 @@ sysretl_audit:
14084
14085 cstar_tracesys:
14086 #ifdef CONFIG_AUDITSYSCALL
14087 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14088 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
14089 jz cstar_auditsys
14090 #endif
14091 xchgl %r9d,%ebp
14092 @@ -366,11 +445,19 @@ cstar_tracesys:
14093 xchgl %ebp,%r9d
14094 cmpq $(IA32_NR_syscalls-1),%rax
14095 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
14096 +
14097 + pax_erase_kstack
14098 +
14099 jmp cstar_do_call
14100 END(ia32_cstar_target)
14101
14102 ia32_badarg:
14103 ASM_CLAC
14104 +
14105 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14106 + ASM_PAX_CLOSE_USERLAND
14107 +#endif
14108 +
14109 movq $-EFAULT,%rax
14110 jmp ia32_sysret
14111 CFI_ENDPROC
14112 @@ -407,19 +494,26 @@ ENTRY(ia32_syscall)
14113 CFI_REL_OFFSET rip,RIP-RIP
14114 PARAVIRT_ADJUST_EXCEPTION_FRAME
14115 SWAPGS
14116 - /*
14117 - * No need to follow this irqs on/off section: the syscall
14118 - * disabled irqs and here we enable it straight after entry:
14119 - */
14120 - ENABLE_INTERRUPTS(CLBR_NONE)
14121 movl %eax,%eax
14122 pushq_cfi %rax
14123 cld
14124 /* note the registers are not zero extended to the sf.
14125 this could be a problem. */
14126 SAVE_ARGS 0,1,0
14127 - orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14128 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
14129 + pax_enter_kernel_user
14130 +
14131 +#ifdef CONFIG_PAX_RANDKSTACK
14132 + pax_erase_kstack
14133 +#endif
14134 +
14135 + /*
14136 + * No need to follow this irqs on/off section: the syscall
14137 + * disabled irqs and here we enable it straight after entry:
14138 + */
14139 + ENABLE_INTERRUPTS(CLBR_NONE)
14140 + GET_THREAD_INFO(%r11)
14141 + orl $TS_COMPAT,TI_status(%r11)
14142 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
14143 jnz ia32_tracesys
14144 cmpq $(IA32_NR_syscalls-1),%rax
14145 ja ia32_badsys
14146 @@ -442,6 +536,9 @@ ia32_tracesys:
14147 RESTORE_REST
14148 cmpq $(IA32_NR_syscalls-1),%rax
14149 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
14150 +
14151 + pax_erase_kstack
14152 +
14153 jmp ia32_do_call
14154 END(ia32_syscall)
14155
14156 diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
14157 index 8e0ceec..af13504 100644
14158 --- a/arch/x86/ia32/sys_ia32.c
14159 +++ b/arch/x86/ia32/sys_ia32.c
14160 @@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
14161 */
14162 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
14163 {
14164 - typeof(ubuf->st_uid) uid = 0;
14165 - typeof(ubuf->st_gid) gid = 0;
14166 + typeof(((struct stat64 *)0)->st_uid) uid = 0;
14167 + typeof(((struct stat64 *)0)->st_gid) gid = 0;
14168 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
14169 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
14170 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
14171 diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
14172 index 372231c..51b537d 100644
14173 --- a/arch/x86/include/asm/alternative-asm.h
14174 +++ b/arch/x86/include/asm/alternative-asm.h
14175 @@ -18,6 +18,45 @@
14176 .endm
14177 #endif
14178
14179 +#ifdef KERNEXEC_PLUGIN
14180 + .macro pax_force_retaddr_bts rip=0
14181 + btsq $63,\rip(%rsp)
14182 + .endm
14183 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14184 + .macro pax_force_retaddr rip=0, reload=0
14185 + btsq $63,\rip(%rsp)
14186 + .endm
14187 + .macro pax_force_fptr ptr
14188 + btsq $63,\ptr
14189 + .endm
14190 + .macro pax_set_fptr_mask
14191 + .endm
14192 +#endif
14193 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
14194 + .macro pax_force_retaddr rip=0, reload=0
14195 + .if \reload
14196 + pax_set_fptr_mask
14197 + .endif
14198 + orq %r12,\rip(%rsp)
14199 + .endm
14200 + .macro pax_force_fptr ptr
14201 + orq %r12,\ptr
14202 + .endm
14203 + .macro pax_set_fptr_mask
14204 + movabs $0x8000000000000000,%r12
14205 + .endm
14206 +#endif
14207 +#else
14208 + .macro pax_force_retaddr rip=0, reload=0
14209 + .endm
14210 + .macro pax_force_fptr ptr
14211 + .endm
14212 + .macro pax_force_retaddr_bts rip=0
14213 + .endm
14214 + .macro pax_set_fptr_mask
14215 + .endm
14216 +#endif
14217 +
14218 .macro altinstruction_entry orig alt feature orig_len alt_len
14219 .long \orig - .
14220 .long \alt - .
14221 diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
14222 index 0a3f9c9..c9d081d 100644
14223 --- a/arch/x86/include/asm/alternative.h
14224 +++ b/arch/x86/include/asm/alternative.h
14225 @@ -106,7 +106,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
14226 ".pushsection .discard,\"aw\",@progbits\n" \
14227 DISCARD_ENTRY(1) \
14228 ".popsection\n" \
14229 - ".pushsection .altinstr_replacement, \"ax\"\n" \
14230 + ".pushsection .altinstr_replacement, \"a\"\n" \
14231 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
14232 ".popsection"
14233
14234 @@ -120,7 +120,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
14235 DISCARD_ENTRY(1) \
14236 DISCARD_ENTRY(2) \
14237 ".popsection\n" \
14238 - ".pushsection .altinstr_replacement, \"ax\"\n" \
14239 + ".pushsection .altinstr_replacement, \"a\"\n" \
14240 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
14241 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
14242 ".popsection"
14243 diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
14244 index 1d2091a..f5074c1 100644
14245 --- a/arch/x86/include/asm/apic.h
14246 +++ b/arch/x86/include/asm/apic.h
14247 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(void)
14248
14249 #ifdef CONFIG_X86_LOCAL_APIC
14250
14251 -extern unsigned int apic_verbosity;
14252 +extern int apic_verbosity;
14253 extern int local_apic_timer_c2_ok;
14254
14255 extern int disable_apic;
14256 diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
14257 index 20370c6..a2eb9b0 100644
14258 --- a/arch/x86/include/asm/apm.h
14259 +++ b/arch/x86/include/asm/apm.h
14260 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
14261 __asm__ __volatile__(APM_DO_ZERO_SEGS
14262 "pushl %%edi\n\t"
14263 "pushl %%ebp\n\t"
14264 - "lcall *%%cs:apm_bios_entry\n\t"
14265 + "lcall *%%ss:apm_bios_entry\n\t"
14266 "setc %%al\n\t"
14267 "popl %%ebp\n\t"
14268 "popl %%edi\n\t"
14269 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
14270 __asm__ __volatile__(APM_DO_ZERO_SEGS
14271 "pushl %%edi\n\t"
14272 "pushl %%ebp\n\t"
14273 - "lcall *%%cs:apm_bios_entry\n\t"
14274 + "lcall *%%ss:apm_bios_entry\n\t"
14275 "setc %%bl\n\t"
14276 "popl %%ebp\n\t"
14277 "popl %%edi\n\t"
14278 diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
14279 index 722aa3b..c392d85 100644
14280 --- a/arch/x86/include/asm/atomic.h
14281 +++ b/arch/x86/include/asm/atomic.h
14282 @@ -22,7 +22,18 @@
14283 */
14284 static inline int atomic_read(const atomic_t *v)
14285 {
14286 - return (*(volatile int *)&(v)->counter);
14287 + return (*(volatile const int *)&(v)->counter);
14288 +}
14289 +
14290 +/**
14291 + * atomic_read_unchecked - read atomic variable
14292 + * @v: pointer of type atomic_unchecked_t
14293 + *
14294 + * Atomically reads the value of @v.
14295 + */
14296 +static inline int __intentional_overflow(-1) atomic_read_unchecked(const atomic_unchecked_t *v)
14297 +{
14298 + return (*(volatile const int *)&(v)->counter);
14299 }
14300
14301 /**
14302 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
14303 }
14304
14305 /**
14306 + * atomic_set_unchecked - set atomic variable
14307 + * @v: pointer of type atomic_unchecked_t
14308 + * @i: required value
14309 + *
14310 + * Atomically sets the value of @v to @i.
14311 + */
14312 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
14313 +{
14314 + v->counter = i;
14315 +}
14316 +
14317 +/**
14318 * atomic_add - add integer to atomic variable
14319 * @i: integer value to add
14320 * @v: pointer of type atomic_t
14321 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
14322 */
14323 static inline void atomic_add(int i, atomic_t *v)
14324 {
14325 - asm volatile(LOCK_PREFIX "addl %1,%0"
14326 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
14327 +
14328 +#ifdef CONFIG_PAX_REFCOUNT
14329 + "jno 0f\n"
14330 + LOCK_PREFIX "subl %1,%0\n"
14331 + "int $4\n0:\n"
14332 + _ASM_EXTABLE(0b, 0b)
14333 +#endif
14334 +
14335 + : "+m" (v->counter)
14336 + : "ir" (i));
14337 +}
14338 +
14339 +/**
14340 + * atomic_add_unchecked - add integer to atomic variable
14341 + * @i: integer value to add
14342 + * @v: pointer of type atomic_unchecked_t
14343 + *
14344 + * Atomically adds @i to @v.
14345 + */
14346 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
14347 +{
14348 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
14349 : "+m" (v->counter)
14350 : "ir" (i));
14351 }
14352 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
14353 */
14354 static inline void atomic_sub(int i, atomic_t *v)
14355 {
14356 - asm volatile(LOCK_PREFIX "subl %1,%0"
14357 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
14358 +
14359 +#ifdef CONFIG_PAX_REFCOUNT
14360 + "jno 0f\n"
14361 + LOCK_PREFIX "addl %1,%0\n"
14362 + "int $4\n0:\n"
14363 + _ASM_EXTABLE(0b, 0b)
14364 +#endif
14365 +
14366 + : "+m" (v->counter)
14367 + : "ir" (i));
14368 +}
14369 +
14370 +/**
14371 + * atomic_sub_unchecked - subtract integer from atomic variable
14372 + * @i: integer value to subtract
14373 + * @v: pointer of type atomic_unchecked_t
14374 + *
14375 + * Atomically subtracts @i from @v.
14376 + */
14377 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
14378 +{
14379 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
14380 : "+m" (v->counter)
14381 : "ir" (i));
14382 }
14383 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
14384 {
14385 unsigned char c;
14386
14387 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
14388 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
14389 +
14390 +#ifdef CONFIG_PAX_REFCOUNT
14391 + "jno 0f\n"
14392 + LOCK_PREFIX "addl %2,%0\n"
14393 + "int $4\n0:\n"
14394 + _ASM_EXTABLE(0b, 0b)
14395 +#endif
14396 +
14397 + "sete %1\n"
14398 : "+m" (v->counter), "=qm" (c)
14399 : "ir" (i) : "memory");
14400 return c;
14401 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
14402 */
14403 static inline void atomic_inc(atomic_t *v)
14404 {
14405 - asm volatile(LOCK_PREFIX "incl %0"
14406 + asm volatile(LOCK_PREFIX "incl %0\n"
14407 +
14408 +#ifdef CONFIG_PAX_REFCOUNT
14409 + "jno 0f\n"
14410 + LOCK_PREFIX "decl %0\n"
14411 + "int $4\n0:\n"
14412 + _ASM_EXTABLE(0b, 0b)
14413 +#endif
14414 +
14415 + : "+m" (v->counter));
14416 +}
14417 +
14418 +/**
14419 + * atomic_inc_unchecked - increment atomic variable
14420 + * @v: pointer of type atomic_unchecked_t
14421 + *
14422 + * Atomically increments @v by 1.
14423 + */
14424 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
14425 +{
14426 + asm volatile(LOCK_PREFIX "incl %0\n"
14427 : "+m" (v->counter));
14428 }
14429
14430 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
14431 */
14432 static inline void atomic_dec(atomic_t *v)
14433 {
14434 - asm volatile(LOCK_PREFIX "decl %0"
14435 + asm volatile(LOCK_PREFIX "decl %0\n"
14436 +
14437 +#ifdef CONFIG_PAX_REFCOUNT
14438 + "jno 0f\n"
14439 + LOCK_PREFIX "incl %0\n"
14440 + "int $4\n0:\n"
14441 + _ASM_EXTABLE(0b, 0b)
14442 +#endif
14443 +
14444 + : "+m" (v->counter));
14445 +}
14446 +
14447 +/**
14448 + * atomic_dec_unchecked - decrement atomic variable
14449 + * @v: pointer of type atomic_unchecked_t
14450 + *
14451 + * Atomically decrements @v by 1.
14452 + */
14453 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
14454 +{
14455 + asm volatile(LOCK_PREFIX "decl %0\n"
14456 : "+m" (v->counter));
14457 }
14458
14459 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
14460 {
14461 unsigned char c;
14462
14463 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
14464 + asm volatile(LOCK_PREFIX "decl %0\n"
14465 +
14466 +#ifdef CONFIG_PAX_REFCOUNT
14467 + "jno 0f\n"
14468 + LOCK_PREFIX "incl %0\n"
14469 + "int $4\n0:\n"
14470 + _ASM_EXTABLE(0b, 0b)
14471 +#endif
14472 +
14473 + "sete %1\n"
14474 : "+m" (v->counter), "=qm" (c)
14475 : : "memory");
14476 return c != 0;
14477 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
14478 {
14479 unsigned char c;
14480
14481 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
14482 + asm volatile(LOCK_PREFIX "incl %0\n"
14483 +
14484 +#ifdef CONFIG_PAX_REFCOUNT
14485 + "jno 0f\n"
14486 + LOCK_PREFIX "decl %0\n"
14487 + "int $4\n0:\n"
14488 + _ASM_EXTABLE(0b, 0b)
14489 +#endif
14490 +
14491 + "sete %1\n"
14492 + : "+m" (v->counter), "=qm" (c)
14493 + : : "memory");
14494 + return c != 0;
14495 +}
14496 +
14497 +/**
14498 + * atomic_inc_and_test_unchecked - increment and test
14499 + * @v: pointer of type atomic_unchecked_t
14500 + *
14501 + * Atomically increments @v by 1
14502 + * and returns true if the result is zero, or false for all
14503 + * other cases.
14504 + */
14505 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
14506 +{
14507 + unsigned char c;
14508 +
14509 + asm volatile(LOCK_PREFIX "incl %0\n"
14510 + "sete %1\n"
14511 : "+m" (v->counter), "=qm" (c)
14512 : : "memory");
14513 return c != 0;
14514 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
14515 {
14516 unsigned char c;
14517
14518 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
14519 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
14520 +
14521 +#ifdef CONFIG_PAX_REFCOUNT
14522 + "jno 0f\n"
14523 + LOCK_PREFIX "subl %2,%0\n"
14524 + "int $4\n0:\n"
14525 + _ASM_EXTABLE(0b, 0b)
14526 +#endif
14527 +
14528 + "sets %1\n"
14529 : "+m" (v->counter), "=qm" (c)
14530 : "ir" (i) : "memory");
14531 return c;
14532 @@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
14533 */
14534 static inline int atomic_add_return(int i, atomic_t *v)
14535 {
14536 + return i + xadd_check_overflow(&v->counter, i);
14537 +}
14538 +
14539 +/**
14540 + * atomic_add_return_unchecked - add integer and return
14541 + * @i: integer value to add
14542 + * @v: pointer of type atomic_unchecked_t
14543 + *
14544 + * Atomically adds @i to @v and returns @i + @v
14545 + */
14546 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
14547 +{
14548 return i + xadd(&v->counter, i);
14549 }
14550
14551 @@ -188,9 +362,18 @@ static inline int atomic_sub_return(int i, atomic_t *v)
14552 }
14553
14554 #define atomic_inc_return(v) (atomic_add_return(1, v))
14555 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
14556 +{
14557 + return atomic_add_return_unchecked(1, v);
14558 +}
14559 #define atomic_dec_return(v) (atomic_sub_return(1, v))
14560
14561 -static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
14562 +static inline int __intentional_overflow(-1) atomic_cmpxchg(atomic_t *v, int old, int new)
14563 +{
14564 + return cmpxchg(&v->counter, old, new);
14565 +}
14566 +
14567 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
14568 {
14569 return cmpxchg(&v->counter, old, new);
14570 }
14571 @@ -200,6 +383,11 @@ static inline int atomic_xchg(atomic_t *v, int new)
14572 return xchg(&v->counter, new);
14573 }
14574
14575 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
14576 +{
14577 + return xchg(&v->counter, new);
14578 +}
14579 +
14580 /**
14581 * __atomic_add_unless - add unless the number is already a given value
14582 * @v: pointer of type atomic_t
14583 @@ -209,14 +397,27 @@ static inline int atomic_xchg(atomic_t *v, int new)
14584 * Atomically adds @a to @v, so long as @v was not already @u.
14585 * Returns the old value of @v.
14586 */
14587 -static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14588 +static inline int __intentional_overflow(-1) __atomic_add_unless(atomic_t *v, int a, int u)
14589 {
14590 - int c, old;
14591 + int c, old, new;
14592 c = atomic_read(v);
14593 for (;;) {
14594 - if (unlikely(c == (u)))
14595 + if (unlikely(c == u))
14596 break;
14597 - old = atomic_cmpxchg((v), c, c + (a));
14598 +
14599 + asm volatile("addl %2,%0\n"
14600 +
14601 +#ifdef CONFIG_PAX_REFCOUNT
14602 + "jno 0f\n"
14603 + "subl %2,%0\n"
14604 + "int $4\n0:\n"
14605 + _ASM_EXTABLE(0b, 0b)
14606 +#endif
14607 +
14608 + : "=r" (new)
14609 + : "0" (c), "ir" (a));
14610 +
14611 + old = atomic_cmpxchg(v, c, new);
14612 if (likely(old == c))
14613 break;
14614 c = old;
14615 @@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
14616 }
14617
14618 /**
14619 + * atomic_inc_not_zero_hint - increment if not null
14620 + * @v: pointer of type atomic_t
14621 + * @hint: probable value of the atomic before the increment
14622 + *
14623 + * This version of atomic_inc_not_zero() gives a hint of probable
14624 + * value of the atomic. This helps processor to not read the memory
14625 + * before doing the atomic read/modify/write cycle, lowering
14626 + * number of bus transactions on some arches.
14627 + *
14628 + * Returns: 0 if increment was not done, 1 otherwise.
14629 + */
14630 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
14631 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
14632 +{
14633 + int val, c = hint, new;
14634 +
14635 + /* sanity test, should be removed by compiler if hint is a constant */
14636 + if (!hint)
14637 + return __atomic_add_unless(v, 1, 0);
14638 +
14639 + do {
14640 + asm volatile("incl %0\n"
14641 +
14642 +#ifdef CONFIG_PAX_REFCOUNT
14643 + "jno 0f\n"
14644 + "decl %0\n"
14645 + "int $4\n0:\n"
14646 + _ASM_EXTABLE(0b, 0b)
14647 +#endif
14648 +
14649 + : "=r" (new)
14650 + : "0" (c));
14651 +
14652 + val = atomic_cmpxchg(v, c, new);
14653 + if (val == c)
14654 + return 1;
14655 + c = val;
14656 + } while (c);
14657 +
14658 + return 0;
14659 +}
14660 +
14661 +/**
14662 * atomic_inc_short - increment of a short integer
14663 * @v: pointer to type int
14664 *
14665 @@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
14666 #endif
14667
14668 /* These are x86-specific, used by some header files */
14669 -#define atomic_clear_mask(mask, addr) \
14670 - asm volatile(LOCK_PREFIX "andl %0,%1" \
14671 - : : "r" (~(mask)), "m" (*(addr)) : "memory")
14672 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
14673 +{
14674 + asm volatile(LOCK_PREFIX "andl %1,%0"
14675 + : "+m" (v->counter)
14676 + : "r" (~(mask))
14677 + : "memory");
14678 +}
14679
14680 -#define atomic_set_mask(mask, addr) \
14681 - asm volatile(LOCK_PREFIX "orl %0,%1" \
14682 - : : "r" ((unsigned)(mask)), "m" (*(addr)) \
14683 - : "memory")
14684 +static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14685 +{
14686 + asm volatile(LOCK_PREFIX "andl %1,%0"
14687 + : "+m" (v->counter)
14688 + : "r" (~(mask))
14689 + : "memory");
14690 +}
14691 +
14692 +static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
14693 +{
14694 + asm volatile(LOCK_PREFIX "orl %1,%0"
14695 + : "+m" (v->counter)
14696 + : "r" (mask)
14697 + : "memory");
14698 +}
14699 +
14700 +static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
14701 +{
14702 + asm volatile(LOCK_PREFIX "orl %1,%0"
14703 + : "+m" (v->counter)
14704 + : "r" (mask)
14705 + : "memory");
14706 +}
14707
14708 /* Atomic operations are already serializing on x86 */
14709 #define smp_mb__before_atomic_dec() barrier()
14710 diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
14711 index b154de7..bf18a5a 100644
14712 --- a/arch/x86/include/asm/atomic64_32.h
14713 +++ b/arch/x86/include/asm/atomic64_32.h
14714 @@ -12,6 +12,14 @@ typedef struct {
14715 u64 __aligned(8) counter;
14716 } atomic64_t;
14717
14718 +#ifdef CONFIG_PAX_REFCOUNT
14719 +typedef struct {
14720 + u64 __aligned(8) counter;
14721 +} atomic64_unchecked_t;
14722 +#else
14723 +typedef atomic64_t atomic64_unchecked_t;
14724 +#endif
14725 +
14726 #define ATOMIC64_INIT(val) { (val) }
14727
14728 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
14729 @@ -37,21 +45,31 @@ typedef struct {
14730 ATOMIC64_DECL_ONE(sym##_386)
14731
14732 ATOMIC64_DECL_ONE(add_386);
14733 +ATOMIC64_DECL_ONE(add_unchecked_386);
14734 ATOMIC64_DECL_ONE(sub_386);
14735 +ATOMIC64_DECL_ONE(sub_unchecked_386);
14736 ATOMIC64_DECL_ONE(inc_386);
14737 +ATOMIC64_DECL_ONE(inc_unchecked_386);
14738 ATOMIC64_DECL_ONE(dec_386);
14739 +ATOMIC64_DECL_ONE(dec_unchecked_386);
14740 #endif
14741
14742 #define alternative_atomic64(f, out, in...) \
14743 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
14744
14745 ATOMIC64_DECL(read);
14746 +ATOMIC64_DECL(read_unchecked);
14747 ATOMIC64_DECL(set);
14748 +ATOMIC64_DECL(set_unchecked);
14749 ATOMIC64_DECL(xchg);
14750 ATOMIC64_DECL(add_return);
14751 +ATOMIC64_DECL(add_return_unchecked);
14752 ATOMIC64_DECL(sub_return);
14753 +ATOMIC64_DECL(sub_return_unchecked);
14754 ATOMIC64_DECL(inc_return);
14755 +ATOMIC64_DECL(inc_return_unchecked);
14756 ATOMIC64_DECL(dec_return);
14757 +ATOMIC64_DECL(dec_return_unchecked);
14758 ATOMIC64_DECL(dec_if_positive);
14759 ATOMIC64_DECL(inc_not_zero);
14760 ATOMIC64_DECL(add_unless);
14761 @@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
14762 }
14763
14764 /**
14765 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
14766 + * @p: pointer to type atomic64_unchecked_t
14767 + * @o: expected value
14768 + * @n: new value
14769 + *
14770 + * Atomically sets @v to @n if it was equal to @o and returns
14771 + * the old value.
14772 + */
14773 +
14774 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
14775 +{
14776 + return cmpxchg64(&v->counter, o, n);
14777 +}
14778 +
14779 +/**
14780 * atomic64_xchg - xchg atomic64 variable
14781 * @v: pointer to type atomic64_t
14782 * @n: value to assign
14783 @@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
14784 }
14785
14786 /**
14787 + * atomic64_set_unchecked - set atomic64 variable
14788 + * @v: pointer to type atomic64_unchecked_t
14789 + * @n: value to assign
14790 + *
14791 + * Atomically sets the value of @v to @n.
14792 + */
14793 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
14794 +{
14795 + unsigned high = (unsigned)(i >> 32);
14796 + unsigned low = (unsigned)i;
14797 + alternative_atomic64(set, /* no output */,
14798 + "S" (v), "b" (low), "c" (high)
14799 + : "eax", "edx", "memory");
14800 +}
14801 +
14802 +/**
14803 * atomic64_read - read atomic64 variable
14804 * @v: pointer to type atomic64_t
14805 *
14806 @@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
14807 }
14808
14809 /**
14810 + * atomic64_read_unchecked - read atomic64 variable
14811 + * @v: pointer to type atomic64_unchecked_t
14812 + *
14813 + * Atomically reads the value of @v and returns it.
14814 + */
14815 +static inline long long __intentional_overflow(-1) atomic64_read_unchecked(atomic64_unchecked_t *v)
14816 +{
14817 + long long r;
14818 + alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
14819 + return r;
14820 + }
14821 +
14822 +/**
14823 * atomic64_add_return - add and return
14824 * @i: integer value to add
14825 * @v: pointer to type atomic64_t
14826 @@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
14827 return i;
14828 }
14829
14830 +/**
14831 + * atomic64_add_return_unchecked - add and return
14832 + * @i: integer value to add
14833 + * @v: pointer to type atomic64_unchecked_t
14834 + *
14835 + * Atomically adds @i to @v and returns @i + *@v
14836 + */
14837 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
14838 +{
14839 + alternative_atomic64(add_return_unchecked,
14840 + ASM_OUTPUT2("+A" (i), "+c" (v)),
14841 + ASM_NO_INPUT_CLOBBER("memory"));
14842 + return i;
14843 +}
14844 +
14845 /*
14846 * Other variants with different arithmetic operators:
14847 */
14848 @@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
14849 return a;
14850 }
14851
14852 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
14853 +{
14854 + long long a;
14855 + alternative_atomic64(inc_return_unchecked, "=&A" (a),
14856 + "S" (v) : "memory", "ecx");
14857 + return a;
14858 +}
14859 +
14860 static inline long long atomic64_dec_return(atomic64_t *v)
14861 {
14862 long long a;
14863 @@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
14864 }
14865
14866 /**
14867 + * atomic64_add_unchecked - add integer to atomic64 variable
14868 + * @i: integer value to add
14869 + * @v: pointer to type atomic64_unchecked_t
14870 + *
14871 + * Atomically adds @i to @v.
14872 + */
14873 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
14874 +{
14875 + __alternative_atomic64(add_unchecked, add_return_unchecked,
14876 + ASM_OUTPUT2("+A" (i), "+c" (v)),
14877 + ASM_NO_INPUT_CLOBBER("memory"));
14878 + return i;
14879 +}
14880 +
14881 +/**
14882 * atomic64_sub - subtract the atomic64 variable
14883 * @i: integer value to subtract
14884 * @v: pointer to type atomic64_t
14885 diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
14886 index 0e1cbfc..a891fc7 100644
14887 --- a/arch/x86/include/asm/atomic64_64.h
14888 +++ b/arch/x86/include/asm/atomic64_64.h
14889 @@ -18,7 +18,19 @@
14890 */
14891 static inline long atomic64_read(const atomic64_t *v)
14892 {
14893 - return (*(volatile long *)&(v)->counter);
14894 + return (*(volatile const long *)&(v)->counter);
14895 +}
14896 +
14897 +/**
14898 + * atomic64_read_unchecked - read atomic64 variable
14899 + * @v: pointer of type atomic64_unchecked_t
14900 + *
14901 + * Atomically reads the value of @v.
14902 + * Doesn't imply a read memory barrier.
14903 + */
14904 +static inline long __intentional_overflow(-1) atomic64_read_unchecked(const atomic64_unchecked_t *v)
14905 +{
14906 + return (*(volatile const long *)&(v)->counter);
14907 }
14908
14909 /**
14910 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
14911 }
14912
14913 /**
14914 + * atomic64_set_unchecked - set atomic64 variable
14915 + * @v: pointer to type atomic64_unchecked_t
14916 + * @i: required value
14917 + *
14918 + * Atomically sets the value of @v to @i.
14919 + */
14920 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
14921 +{
14922 + v->counter = i;
14923 +}
14924 +
14925 +/**
14926 * atomic64_add - add integer to atomic64 variable
14927 * @i: integer value to add
14928 * @v: pointer to type atomic64_t
14929 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
14930 */
14931 static inline void atomic64_add(long i, atomic64_t *v)
14932 {
14933 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
14934 +
14935 +#ifdef CONFIG_PAX_REFCOUNT
14936 + "jno 0f\n"
14937 + LOCK_PREFIX "subq %1,%0\n"
14938 + "int $4\n0:\n"
14939 + _ASM_EXTABLE(0b, 0b)
14940 +#endif
14941 +
14942 + : "=m" (v->counter)
14943 + : "er" (i), "m" (v->counter));
14944 +}
14945 +
14946 +/**
14947 + * atomic64_add_unchecked - add integer to atomic64 variable
14948 + * @i: integer value to add
14949 + * @v: pointer to type atomic64_unchecked_t
14950 + *
14951 + * Atomically adds @i to @v.
14952 + */
14953 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
14954 +{
14955 asm volatile(LOCK_PREFIX "addq %1,%0"
14956 : "=m" (v->counter)
14957 : "er" (i), "m" (v->counter));
14958 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
14959 */
14960 static inline void atomic64_sub(long i, atomic64_t *v)
14961 {
14962 - asm volatile(LOCK_PREFIX "subq %1,%0"
14963 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
14964 +
14965 +#ifdef CONFIG_PAX_REFCOUNT
14966 + "jno 0f\n"
14967 + LOCK_PREFIX "addq %1,%0\n"
14968 + "int $4\n0:\n"
14969 + _ASM_EXTABLE(0b, 0b)
14970 +#endif
14971 +
14972 + : "=m" (v->counter)
14973 + : "er" (i), "m" (v->counter));
14974 +}
14975 +
14976 +/**
14977 + * atomic64_sub_unchecked - subtract the atomic64 variable
14978 + * @i: integer value to subtract
14979 + * @v: pointer to type atomic64_unchecked_t
14980 + *
14981 + * Atomically subtracts @i from @v.
14982 + */
14983 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
14984 +{
14985 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
14986 : "=m" (v->counter)
14987 : "er" (i), "m" (v->counter));
14988 }
14989 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
14990 {
14991 unsigned char c;
14992
14993 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
14994 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
14995 +
14996 +#ifdef CONFIG_PAX_REFCOUNT
14997 + "jno 0f\n"
14998 + LOCK_PREFIX "addq %2,%0\n"
14999 + "int $4\n0:\n"
15000 + _ASM_EXTABLE(0b, 0b)
15001 +#endif
15002 +
15003 + "sete %1\n"
15004 : "=m" (v->counter), "=qm" (c)
15005 : "er" (i), "m" (v->counter) : "memory");
15006 return c;
15007 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
15008 */
15009 static inline void atomic64_inc(atomic64_t *v)
15010 {
15011 + asm volatile(LOCK_PREFIX "incq %0\n"
15012 +
15013 +#ifdef CONFIG_PAX_REFCOUNT
15014 + "jno 0f\n"
15015 + LOCK_PREFIX "decq %0\n"
15016 + "int $4\n0:\n"
15017 + _ASM_EXTABLE(0b, 0b)
15018 +#endif
15019 +
15020 + : "=m" (v->counter)
15021 + : "m" (v->counter));
15022 +}
15023 +
15024 +/**
15025 + * atomic64_inc_unchecked - increment atomic64 variable
15026 + * @v: pointer to type atomic64_unchecked_t
15027 + *
15028 + * Atomically increments @v by 1.
15029 + */
15030 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
15031 +{
15032 asm volatile(LOCK_PREFIX "incq %0"
15033 : "=m" (v->counter)
15034 : "m" (v->counter));
15035 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
15036 */
15037 static inline void atomic64_dec(atomic64_t *v)
15038 {
15039 - asm volatile(LOCK_PREFIX "decq %0"
15040 + asm volatile(LOCK_PREFIX "decq %0\n"
15041 +
15042 +#ifdef CONFIG_PAX_REFCOUNT
15043 + "jno 0f\n"
15044 + LOCK_PREFIX "incq %0\n"
15045 + "int $4\n0:\n"
15046 + _ASM_EXTABLE(0b, 0b)
15047 +#endif
15048 +
15049 + : "=m" (v->counter)
15050 + : "m" (v->counter));
15051 +}
15052 +
15053 +/**
15054 + * atomic64_dec_unchecked - decrement atomic64 variable
15055 + * @v: pointer to type atomic64_t
15056 + *
15057 + * Atomically decrements @v by 1.
15058 + */
15059 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
15060 +{
15061 + asm volatile(LOCK_PREFIX "decq %0\n"
15062 : "=m" (v->counter)
15063 : "m" (v->counter));
15064 }
15065 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
15066 {
15067 unsigned char c;
15068
15069 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
15070 + asm volatile(LOCK_PREFIX "decq %0\n"
15071 +
15072 +#ifdef CONFIG_PAX_REFCOUNT
15073 + "jno 0f\n"
15074 + LOCK_PREFIX "incq %0\n"
15075 + "int $4\n0:\n"
15076 + _ASM_EXTABLE(0b, 0b)
15077 +#endif
15078 +
15079 + "sete %1\n"
15080 : "=m" (v->counter), "=qm" (c)
15081 : "m" (v->counter) : "memory");
15082 return c != 0;
15083 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
15084 {
15085 unsigned char c;
15086
15087 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
15088 + asm volatile(LOCK_PREFIX "incq %0\n"
15089 +
15090 +#ifdef CONFIG_PAX_REFCOUNT
15091 + "jno 0f\n"
15092 + LOCK_PREFIX "decq %0\n"
15093 + "int $4\n0:\n"
15094 + _ASM_EXTABLE(0b, 0b)
15095 +#endif
15096 +
15097 + "sete %1\n"
15098 : "=m" (v->counter), "=qm" (c)
15099 : "m" (v->counter) : "memory");
15100 return c != 0;
15101 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15102 {
15103 unsigned char c;
15104
15105 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
15106 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
15107 +
15108 +#ifdef CONFIG_PAX_REFCOUNT
15109 + "jno 0f\n"
15110 + LOCK_PREFIX "subq %2,%0\n"
15111 + "int $4\n0:\n"
15112 + _ASM_EXTABLE(0b, 0b)
15113 +#endif
15114 +
15115 + "sets %1\n"
15116 : "=m" (v->counter), "=qm" (c)
15117 : "er" (i), "m" (v->counter) : "memory");
15118 return c;
15119 @@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
15120 */
15121 static inline long atomic64_add_return(long i, atomic64_t *v)
15122 {
15123 + return i + xadd_check_overflow(&v->counter, i);
15124 +}
15125 +
15126 +/**
15127 + * atomic64_add_return_unchecked - add and return
15128 + * @i: integer value to add
15129 + * @v: pointer to type atomic64_unchecked_t
15130 + *
15131 + * Atomically adds @i to @v and returns @i + @v
15132 + */
15133 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
15134 +{
15135 return i + xadd(&v->counter, i);
15136 }
15137
15138 @@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
15139 }
15140
15141 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
15142 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
15143 +{
15144 + return atomic64_add_return_unchecked(1, v);
15145 +}
15146 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
15147
15148 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15149 @@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
15150 return cmpxchg(&v->counter, old, new);
15151 }
15152
15153 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
15154 +{
15155 + return cmpxchg(&v->counter, old, new);
15156 +}
15157 +
15158 static inline long atomic64_xchg(atomic64_t *v, long new)
15159 {
15160 return xchg(&v->counter, new);
15161 @@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
15162 */
15163 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
15164 {
15165 - long c, old;
15166 + long c, old, new;
15167 c = atomic64_read(v);
15168 for (;;) {
15169 - if (unlikely(c == (u)))
15170 + if (unlikely(c == u))
15171 break;
15172 - old = atomic64_cmpxchg((v), c, c + (a));
15173 +
15174 + asm volatile("add %2,%0\n"
15175 +
15176 +#ifdef CONFIG_PAX_REFCOUNT
15177 + "jno 0f\n"
15178 + "sub %2,%0\n"
15179 + "int $4\n0:\n"
15180 + _ASM_EXTABLE(0b, 0b)
15181 +#endif
15182 +
15183 + : "=r" (new)
15184 + : "0" (c), "ir" (a));
15185 +
15186 + old = atomic64_cmpxchg(v, c, new);
15187 if (likely(old == c))
15188 break;
15189 c = old;
15190 }
15191 - return c != (u);
15192 + return c != u;
15193 }
15194
15195 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
15196 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
15197 index 41639ce..ebce552 100644
15198 --- a/arch/x86/include/asm/bitops.h
15199 +++ b/arch/x86/include/asm/bitops.h
15200 @@ -48,7 +48,7 @@
15201 * a mask operation on a byte.
15202 */
15203 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
15204 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
15205 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
15206 #define CONST_MASK(nr) (1 << ((nr) & 7))
15207
15208 /**
15209 @@ -361,7 +361,7 @@ static int test_bit(int nr, const volatile unsigned long *addr);
15210 *
15211 * Undefined if no bit exists, so code should check against 0 first.
15212 */
15213 -static inline unsigned long __ffs(unsigned long word)
15214 +static inline unsigned long __intentional_overflow(-1) __ffs(unsigned long word)
15215 {
15216 asm("rep; bsf %1,%0"
15217 : "=r" (word)
15218 @@ -375,7 +375,7 @@ static inline unsigned long __ffs(unsigned long word)
15219 *
15220 * Undefined if no zero exists, so code should check against ~0UL first.
15221 */
15222 -static inline unsigned long ffz(unsigned long word)
15223 +static inline unsigned long __intentional_overflow(-1) ffz(unsigned long word)
15224 {
15225 asm("rep; bsf %1,%0"
15226 : "=r" (word)
15227 @@ -389,7 +389,7 @@ static inline unsigned long ffz(unsigned long word)
15228 *
15229 * Undefined if no set bit exists, so code should check against 0 first.
15230 */
15231 -static inline unsigned long __fls(unsigned long word)
15232 +static inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
15233 {
15234 asm("bsr %1,%0"
15235 : "=r" (word)
15236 @@ -452,7 +452,7 @@ static inline int ffs(int x)
15237 * set bit if value is nonzero. The last (most significant) bit is
15238 * at position 32.
15239 */
15240 -static inline int fls(int x)
15241 +static inline int __intentional_overflow(-1) fls(int x)
15242 {
15243 int r;
15244
15245 @@ -494,7 +494,7 @@ static inline int fls(int x)
15246 * at position 64.
15247 */
15248 #ifdef CONFIG_X86_64
15249 -static __always_inline int fls64(__u64 x)
15250 +static __always_inline long fls64(__u64 x)
15251 {
15252 int bitpos = -1;
15253 /*
15254 diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
15255 index 4fa687a..60f2d39 100644
15256 --- a/arch/x86/include/asm/boot.h
15257 +++ b/arch/x86/include/asm/boot.h
15258 @@ -6,10 +6,15 @@
15259 #include <uapi/asm/boot.h>
15260
15261 /* Physical address where kernel should be loaded. */
15262 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15263 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15264 + (CONFIG_PHYSICAL_ALIGN - 1)) \
15265 & ~(CONFIG_PHYSICAL_ALIGN - 1))
15266
15267 +#ifndef __ASSEMBLY__
15268 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
15269 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
15270 +#endif
15271 +
15272 /* Minimum kernel alignment, as a power of two */
15273 #ifdef CONFIG_X86_64
15274 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
15275 diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
15276 index 48f99f1..d78ebf9 100644
15277 --- a/arch/x86/include/asm/cache.h
15278 +++ b/arch/x86/include/asm/cache.h
15279 @@ -5,12 +5,13 @@
15280
15281 /* L1 cache line size */
15282 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
15283 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
15284 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
15285
15286 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
15287 +#define __read_only __attribute__((__section__(".data..read_only")))
15288
15289 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
15290 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
15291 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
15292
15293 #ifdef CONFIG_X86_VSMP
15294 #ifdef CONFIG_SMP
15295 diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
15296 index 9863ee3..4a1f8e1 100644
15297 --- a/arch/x86/include/asm/cacheflush.h
15298 +++ b/arch/x86/include/asm/cacheflush.h
15299 @@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
15300 unsigned long pg_flags = pg->flags & _PGMT_MASK;
15301
15302 if (pg_flags == _PGMT_DEFAULT)
15303 - return -1;
15304 + return ~0UL;
15305 else if (pg_flags == _PGMT_WC)
15306 return _PAGE_CACHE_WC;
15307 else if (pg_flags == _PGMT_UC_MINUS)
15308 diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
15309 index 0fa6750..cb7b2c3 100644
15310 --- a/arch/x86/include/asm/calling.h
15311 +++ b/arch/x86/include/asm/calling.h
15312 @@ -80,103 +80,113 @@ For 32-bit we have the following conventions - kernel is built with
15313 #define RSP 152
15314 #define SS 160
15315
15316 -#define ARGOFFSET R11
15317 -#define SWFRAME ORIG_RAX
15318 +#define ARGOFFSET R15
15319
15320 .macro SAVE_ARGS addskip=0, save_rcx=1, save_r891011=1
15321 - subq $9*8+\addskip, %rsp
15322 - CFI_ADJUST_CFA_OFFSET 9*8+\addskip
15323 - movq_cfi rdi, 8*8
15324 - movq_cfi rsi, 7*8
15325 - movq_cfi rdx, 6*8
15326 + subq $ORIG_RAX-ARGOFFSET+\addskip, %rsp
15327 + CFI_ADJUST_CFA_OFFSET ORIG_RAX-ARGOFFSET+\addskip
15328 + movq_cfi rdi, RDI
15329 + movq_cfi rsi, RSI
15330 + movq_cfi rdx, RDX
15331
15332 .if \save_rcx
15333 - movq_cfi rcx, 5*8
15334 + movq_cfi rcx, RCX
15335 .endif
15336
15337 - movq_cfi rax, 4*8
15338 + movq_cfi rax, RAX
15339
15340 .if \save_r891011
15341 - movq_cfi r8, 3*8
15342 - movq_cfi r9, 2*8
15343 - movq_cfi r10, 1*8
15344 - movq_cfi r11, 0*8
15345 + movq_cfi r8, R8
15346 + movq_cfi r9, R9
15347 + movq_cfi r10, R10
15348 + movq_cfi r11, R11
15349 .endif
15350
15351 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15352 + movq_cfi r12, R12
15353 +#endif
15354 +
15355 .endm
15356
15357 -#define ARG_SKIP (9*8)
15358 +#define ARG_SKIP ORIG_RAX
15359
15360 .macro RESTORE_ARGS rstor_rax=1, addskip=0, rstor_rcx=1, rstor_r11=1, \
15361 rstor_r8910=1, rstor_rdx=1
15362 +
15363 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15364 + movq_cfi_restore R12, r12
15365 +#endif
15366 +
15367 .if \rstor_r11
15368 - movq_cfi_restore 0*8, r11
15369 + movq_cfi_restore R11, r11
15370 .endif
15371
15372 .if \rstor_r8910
15373 - movq_cfi_restore 1*8, r10
15374 - movq_cfi_restore 2*8, r9
15375 - movq_cfi_restore 3*8, r8
15376 + movq_cfi_restore R10, r10
15377 + movq_cfi_restore R9, r9
15378 + movq_cfi_restore R8, r8
15379 .endif
15380
15381 .if \rstor_rax
15382 - movq_cfi_restore 4*8, rax
15383 + movq_cfi_restore RAX, rax
15384 .endif
15385
15386 .if \rstor_rcx
15387 - movq_cfi_restore 5*8, rcx
15388 + movq_cfi_restore RCX, rcx
15389 .endif
15390
15391 .if \rstor_rdx
15392 - movq_cfi_restore 6*8, rdx
15393 + movq_cfi_restore RDX, rdx
15394 .endif
15395
15396 - movq_cfi_restore 7*8, rsi
15397 - movq_cfi_restore 8*8, rdi
15398 + movq_cfi_restore RSI, rsi
15399 + movq_cfi_restore RDI, rdi
15400
15401 - .if ARG_SKIP+\addskip > 0
15402 - addq $ARG_SKIP+\addskip, %rsp
15403 - CFI_ADJUST_CFA_OFFSET -(ARG_SKIP+\addskip)
15404 + .if ORIG_RAX+\addskip > 0
15405 + addq $ORIG_RAX+\addskip, %rsp
15406 + CFI_ADJUST_CFA_OFFSET -(ORIG_RAX+\addskip)
15407 .endif
15408 .endm
15409
15410 - .macro LOAD_ARGS offset, skiprax=0
15411 - movq \offset(%rsp), %r11
15412 - movq \offset+8(%rsp), %r10
15413 - movq \offset+16(%rsp), %r9
15414 - movq \offset+24(%rsp), %r8
15415 - movq \offset+40(%rsp), %rcx
15416 - movq \offset+48(%rsp), %rdx
15417 - movq \offset+56(%rsp), %rsi
15418 - movq \offset+64(%rsp), %rdi
15419 + .macro LOAD_ARGS skiprax=0
15420 + movq R11(%rsp), %r11
15421 + movq R10(%rsp), %r10
15422 + movq R9(%rsp), %r9
15423 + movq R8(%rsp), %r8
15424 + movq RCX(%rsp), %rcx
15425 + movq RDX(%rsp), %rdx
15426 + movq RSI(%rsp), %rsi
15427 + movq RDI(%rsp), %rdi
15428 .if \skiprax
15429 .else
15430 - movq \offset+72(%rsp), %rax
15431 + movq RAX(%rsp), %rax
15432 .endif
15433 .endm
15434
15435 -#define REST_SKIP (6*8)
15436 -
15437 .macro SAVE_REST
15438 - subq $REST_SKIP, %rsp
15439 - CFI_ADJUST_CFA_OFFSET REST_SKIP
15440 - movq_cfi rbx, 5*8
15441 - movq_cfi rbp, 4*8
15442 - movq_cfi r12, 3*8
15443 - movq_cfi r13, 2*8
15444 - movq_cfi r14, 1*8
15445 - movq_cfi r15, 0*8
15446 + movq_cfi rbx, RBX
15447 + movq_cfi rbp, RBP
15448 +
15449 +#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15450 + movq_cfi r12, R12
15451 +#endif
15452 +
15453 + movq_cfi r13, R13
15454 + movq_cfi r14, R14
15455 + movq_cfi r15, R15
15456 .endm
15457
15458 .macro RESTORE_REST
15459 - movq_cfi_restore 0*8, r15
15460 - movq_cfi_restore 1*8, r14
15461 - movq_cfi_restore 2*8, r13
15462 - movq_cfi_restore 3*8, r12
15463 - movq_cfi_restore 4*8, rbp
15464 - movq_cfi_restore 5*8, rbx
15465 - addq $REST_SKIP, %rsp
15466 - CFI_ADJUST_CFA_OFFSET -(REST_SKIP)
15467 + movq_cfi_restore R15, r15
15468 + movq_cfi_restore R14, r14
15469 + movq_cfi_restore R13, r13
15470 +
15471 +#ifndef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
15472 + movq_cfi_restore R12, r12
15473 +#endif
15474 +
15475 + movq_cfi_restore RBP, rbp
15476 + movq_cfi_restore RBX, rbx
15477 .endm
15478
15479 .macro SAVE_ALL
15480 diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
15481 index f50de69..2b0a458 100644
15482 --- a/arch/x86/include/asm/checksum_32.h
15483 +++ b/arch/x86/include/asm/checksum_32.h
15484 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
15485 int len, __wsum sum,
15486 int *src_err_ptr, int *dst_err_ptr);
15487
15488 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
15489 + int len, __wsum sum,
15490 + int *src_err_ptr, int *dst_err_ptr);
15491 +
15492 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
15493 + int len, __wsum sum,
15494 + int *src_err_ptr, int *dst_err_ptr);
15495 +
15496 /*
15497 * Note: when you get a NULL pointer exception here this means someone
15498 * passed in an incorrect kernel address to one of these functions.
15499 @@ -53,7 +61,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
15500
15501 might_sleep();
15502 stac();
15503 - ret = csum_partial_copy_generic((__force void *)src, dst,
15504 + ret = csum_partial_copy_generic_from_user((__force void *)src, dst,
15505 len, sum, err_ptr, NULL);
15506 clac();
15507
15508 @@ -187,7 +195,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
15509 might_sleep();
15510 if (access_ok(VERIFY_WRITE, dst, len)) {
15511 stac();
15512 - ret = csum_partial_copy_generic(src, (__force void *)dst,
15513 + ret = csum_partial_copy_generic_to_user(src, (__force void *)dst,
15514 len, sum, NULL, err_ptr);
15515 clac();
15516 return ret;
15517 diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
15518 index d47786a..ce1b05d 100644
15519 --- a/arch/x86/include/asm/cmpxchg.h
15520 +++ b/arch/x86/include/asm/cmpxchg.h
15521 @@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
15522 __compiletime_error("Bad argument size for cmpxchg");
15523 extern void __xadd_wrong_size(void)
15524 __compiletime_error("Bad argument size for xadd");
15525 +extern void __xadd_check_overflow_wrong_size(void)
15526 + __compiletime_error("Bad argument size for xadd_check_overflow");
15527 extern void __add_wrong_size(void)
15528 __compiletime_error("Bad argument size for add");
15529 +extern void __add_check_overflow_wrong_size(void)
15530 + __compiletime_error("Bad argument size for add_check_overflow");
15531
15532 /*
15533 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
15534 @@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
15535 __ret; \
15536 })
15537
15538 +#define __xchg_op_check_overflow(ptr, arg, op, lock) \
15539 + ({ \
15540 + __typeof__ (*(ptr)) __ret = (arg); \
15541 + switch (sizeof(*(ptr))) { \
15542 + case __X86_CASE_L: \
15543 + asm volatile (lock #op "l %0, %1\n" \
15544 + "jno 0f\n" \
15545 + "mov %0,%1\n" \
15546 + "int $4\n0:\n" \
15547 + _ASM_EXTABLE(0b, 0b) \
15548 + : "+r" (__ret), "+m" (*(ptr)) \
15549 + : : "memory", "cc"); \
15550 + break; \
15551 + case __X86_CASE_Q: \
15552 + asm volatile (lock #op "q %q0, %1\n" \
15553 + "jno 0f\n" \
15554 + "mov %0,%1\n" \
15555 + "int $4\n0:\n" \
15556 + _ASM_EXTABLE(0b, 0b) \
15557 + : "+r" (__ret), "+m" (*(ptr)) \
15558 + : : "memory", "cc"); \
15559 + break; \
15560 + default: \
15561 + __ ## op ## _check_overflow_wrong_size(); \
15562 + } \
15563 + __ret; \
15564 + })
15565 +
15566 /*
15567 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
15568 * Since this is generally used to protect other memory information, we
15569 @@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
15570 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
15571 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
15572
15573 +#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
15574 +#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
15575 +
15576 #define __add(ptr, inc, lock) \
15577 ({ \
15578 __typeof__ (*(ptr)) __ret = (inc); \
15579 diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
15580 index 59c6c40..5e0b22c 100644
15581 --- a/arch/x86/include/asm/compat.h
15582 +++ b/arch/x86/include/asm/compat.h
15583 @@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
15584 typedef u32 compat_uint_t;
15585 typedef u32 compat_ulong_t;
15586 typedef u64 __attribute__((aligned(4))) compat_u64;
15587 -typedef u32 compat_uptr_t;
15588 +typedef u32 __user compat_uptr_t;
15589
15590 struct compat_timespec {
15591 compat_time_t tv_sec;
15592 diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
15593 index 89270b4..f0abf8e 100644
15594 --- a/arch/x86/include/asm/cpufeature.h
15595 +++ b/arch/x86/include/asm/cpufeature.h
15596 @@ -203,7 +203,7 @@
15597 #define X86_FEATURE_DECODEASSISTS (8*32+12) /* AMD Decode Assists support */
15598 #define X86_FEATURE_PAUSEFILTER (8*32+13) /* AMD filtered pause intercept */
15599 #define X86_FEATURE_PFTHRESHOLD (8*32+14) /* AMD pause filter threshold */
15600 -
15601 +#define X86_FEATURE_STRONGUDEREF (8*32+31) /* PaX PCID based strong UDEREF */
15602
15603 /* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
15604 #define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
15605 @@ -211,7 +211,7 @@
15606 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
15607 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
15608 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
15609 -#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
15610 +#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
15611 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
15612 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
15613 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
15614 @@ -353,6 +353,7 @@ extern const char * const x86_power_flags[32];
15615 #undef cpu_has_centaur_mcr
15616 #define cpu_has_centaur_mcr 0
15617
15618 +#define cpu_has_pcid boot_cpu_has(X86_FEATURE_PCID)
15619 #endif /* CONFIG_X86_64 */
15620
15621 #if __GNUC__ >= 4
15622 @@ -405,7 +406,8 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
15623
15624 #ifdef CONFIG_X86_DEBUG_STATIC_CPU_HAS
15625 t_warn:
15626 - warn_pre_alternatives();
15627 + if (bit != X86_FEATURE_PCID && bit != X86_FEATURE_INVPCID)
15628 + warn_pre_alternatives();
15629 return false;
15630 #endif
15631
15632 @@ -425,7 +427,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
15633 ".section .discard,\"aw\",@progbits\n"
15634 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
15635 ".previous\n"
15636 - ".section .altinstr_replacement,\"ax\"\n"
15637 + ".section .altinstr_replacement,\"a\"\n"
15638 "3: movb $1,%0\n"
15639 "4:\n"
15640 ".previous\n"
15641 @@ -462,7 +464,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
15642 " .byte 2b - 1b\n" /* src len */
15643 " .byte 4f - 3f\n" /* repl len */
15644 ".previous\n"
15645 - ".section .altinstr_replacement,\"ax\"\n"
15646 + ".section .altinstr_replacement,\"a\"\n"
15647 "3: .byte 0xe9\n .long %l[t_no] - 2b\n"
15648 "4:\n"
15649 ".previous\n"
15650 @@ -495,7 +497,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
15651 ".section .discard,\"aw\",@progbits\n"
15652 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
15653 ".previous\n"
15654 - ".section .altinstr_replacement,\"ax\"\n"
15655 + ".section .altinstr_replacement,\"a\"\n"
15656 "3: movb $0,%0\n"
15657 "4:\n"
15658 ".previous\n"
15659 @@ -509,7 +511,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
15660 ".section .discard,\"aw\",@progbits\n"
15661 " .byte 0xff + (6f-5f) - (4b-3b)\n" /* size check */
15662 ".previous\n"
15663 - ".section .altinstr_replacement,\"ax\"\n"
15664 + ".section .altinstr_replacement,\"a\"\n"
15665 "5: movb $1,%0\n"
15666 "6:\n"
15667 ".previous\n"
15668 diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
15669 index b90e5df..b462c91 100644
15670 --- a/arch/x86/include/asm/desc.h
15671 +++ b/arch/x86/include/asm/desc.h
15672 @@ -4,6 +4,7 @@
15673 #include <asm/desc_defs.h>
15674 #include <asm/ldt.h>
15675 #include <asm/mmu.h>
15676 +#include <asm/pgtable.h>
15677
15678 #include <linux/smp.h>
15679 #include <linux/percpu.h>
15680 @@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
15681
15682 desc->type = (info->read_exec_only ^ 1) << 1;
15683 desc->type |= info->contents << 2;
15684 + desc->type |= info->seg_not_present ^ 1;
15685
15686 desc->s = 1;
15687 desc->dpl = 0x3;
15688 @@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
15689 }
15690
15691 extern struct desc_ptr idt_descr;
15692 -extern gate_desc idt_table[];
15693 -extern struct desc_ptr debug_idt_descr;
15694 -extern gate_desc debug_idt_table[];
15695 -
15696 -struct gdt_page {
15697 - struct desc_struct gdt[GDT_ENTRIES];
15698 -} __attribute__((aligned(PAGE_SIZE)));
15699 -
15700 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
15701 +extern gate_desc idt_table[IDT_ENTRIES];
15702 +extern const struct desc_ptr debug_idt_descr;
15703 +extern gate_desc debug_idt_table[IDT_ENTRIES];
15704
15705 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
15706 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
15707 {
15708 - return per_cpu(gdt_page, cpu).gdt;
15709 + return cpu_gdt_table[cpu];
15710 }
15711
15712 #ifdef CONFIG_X86_64
15713 @@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
15714 unsigned long base, unsigned dpl, unsigned flags,
15715 unsigned short seg)
15716 {
15717 - gate->a = (seg << 16) | (base & 0xffff);
15718 - gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
15719 + gate->gate.offset_low = base;
15720 + gate->gate.seg = seg;
15721 + gate->gate.reserved = 0;
15722 + gate->gate.type = type;
15723 + gate->gate.s = 0;
15724 + gate->gate.dpl = dpl;
15725 + gate->gate.p = 1;
15726 + gate->gate.offset_high = base >> 16;
15727 }
15728
15729 #endif
15730 @@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
15731
15732 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
15733 {
15734 + pax_open_kernel();
15735 memcpy(&idt[entry], gate, sizeof(*gate));
15736 + pax_close_kernel();
15737 }
15738
15739 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
15740 {
15741 + pax_open_kernel();
15742 memcpy(&ldt[entry], desc, 8);
15743 + pax_close_kernel();
15744 }
15745
15746 static inline void
15747 @@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
15748 default: size = sizeof(*gdt); break;
15749 }
15750
15751 + pax_open_kernel();
15752 memcpy(&gdt[entry], desc, size);
15753 + pax_close_kernel();
15754 }
15755
15756 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
15757 @@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
15758
15759 static inline void native_load_tr_desc(void)
15760 {
15761 + pax_open_kernel();
15762 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
15763 + pax_close_kernel();
15764 }
15765
15766 static inline void native_load_gdt(const struct desc_ptr *dtr)
15767 @@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
15768 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
15769 unsigned int i;
15770
15771 + pax_open_kernel();
15772 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
15773 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
15774 + pax_close_kernel();
15775 }
15776
15777 #define _LDT_empty(info) \
15778 @@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
15779 preempt_enable();
15780 }
15781
15782 -static inline unsigned long get_desc_base(const struct desc_struct *desc)
15783 +static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
15784 {
15785 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
15786 }
15787 @@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
15788 }
15789
15790 #ifdef CONFIG_X86_64
15791 -static inline void set_nmi_gate(int gate, void *addr)
15792 +static inline void set_nmi_gate(int gate, const void *addr)
15793 {
15794 gate_desc s;
15795
15796 @@ -321,8 +334,8 @@ static inline void set_nmi_gate(int gate, void *addr)
15797 #endif
15798
15799 #ifdef CONFIG_TRACING
15800 -extern struct desc_ptr trace_idt_descr;
15801 -extern gate_desc trace_idt_table[];
15802 +extern const struct desc_ptr trace_idt_descr;
15803 +extern gate_desc trace_idt_table[IDT_ENTRIES];
15804 static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
15805 {
15806 write_idt_entry(trace_idt_table, entry, gate);
15807 @@ -333,7 +346,7 @@ static inline void write_trace_idt_entry(int entry, const gate_desc *gate)
15808 }
15809 #endif
15810
15811 -static inline void _set_gate(int gate, unsigned type, void *addr,
15812 +static inline void _set_gate(int gate, unsigned type, const void *addr,
15813 unsigned dpl, unsigned ist, unsigned seg)
15814 {
15815 gate_desc s;
15816 @@ -353,7 +366,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
15817 * Pentium F0 0F bugfix can have resulted in the mapped
15818 * IDT being write-protected.
15819 */
15820 -static inline void set_intr_gate(unsigned int n, void *addr)
15821 +static inline void set_intr_gate(unsigned int n, const void *addr)
15822 {
15823 BUG_ON((unsigned)n > 0xFF);
15824 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
15825 @@ -410,19 +423,19 @@ static inline void __alloc_intr_gate(unsigned int n, void *addr)
15826 /*
15827 * This routine sets up an interrupt gate at directory privilege level 3.
15828 */
15829 -static inline void set_system_intr_gate(unsigned int n, void *addr)
15830 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
15831 {
15832 BUG_ON((unsigned)n > 0xFF);
15833 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
15834 }
15835
15836 -static inline void set_system_trap_gate(unsigned int n, void *addr)
15837 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
15838 {
15839 BUG_ON((unsigned)n > 0xFF);
15840 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
15841 }
15842
15843 -static inline void set_trap_gate(unsigned int n, void *addr)
15844 +static inline void set_trap_gate(unsigned int n, const void *addr)
15845 {
15846 BUG_ON((unsigned)n > 0xFF);
15847 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
15848 @@ -431,16 +444,16 @@ static inline void set_trap_gate(unsigned int n, void *addr)
15849 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
15850 {
15851 BUG_ON((unsigned)n > 0xFF);
15852 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
15853 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
15854 }
15855
15856 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
15857 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
15858 {
15859 BUG_ON((unsigned)n > 0xFF);
15860 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
15861 }
15862
15863 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
15864 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
15865 {
15866 BUG_ON((unsigned)n > 0xFF);
15867 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
15868 @@ -512,4 +525,17 @@ static inline void load_current_idt(void)
15869 else
15870 load_idt((const struct desc_ptr *)&idt_descr);
15871 }
15872 +
15873 +#ifdef CONFIG_X86_32
15874 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
15875 +{
15876 + struct desc_struct d;
15877 +
15878 + if (likely(limit))
15879 + limit = (limit - 1UL) >> PAGE_SHIFT;
15880 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
15881 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
15882 +}
15883 +#endif
15884 +
15885 #endif /* _ASM_X86_DESC_H */
15886 diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
15887 index 278441f..b95a174 100644
15888 --- a/arch/x86/include/asm/desc_defs.h
15889 +++ b/arch/x86/include/asm/desc_defs.h
15890 @@ -31,6 +31,12 @@ struct desc_struct {
15891 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
15892 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
15893 };
15894 + struct {
15895 + u16 offset_low;
15896 + u16 seg;
15897 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
15898 + unsigned offset_high: 16;
15899 + } gate;
15900 };
15901 } __attribute__((packed));
15902
15903 diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
15904 index ced283a..ffe04cc 100644
15905 --- a/arch/x86/include/asm/div64.h
15906 +++ b/arch/x86/include/asm/div64.h
15907 @@ -39,7 +39,7 @@
15908 __mod; \
15909 })
15910
15911 -static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15912 +static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
15913 {
15914 union {
15915 u64 v64;
15916 diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
15917 index 9c999c1..3860cb8 100644
15918 --- a/arch/x86/include/asm/elf.h
15919 +++ b/arch/x86/include/asm/elf.h
15920 @@ -243,7 +243,25 @@ extern int force_personality32;
15921 the loader. We need to make sure that it is out of the way of the program
15922 that it will "exec", and that there is sufficient room for the brk. */
15923
15924 +#ifdef CONFIG_PAX_SEGMEXEC
15925 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
15926 +#else
15927 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
15928 +#endif
15929 +
15930 +#ifdef CONFIG_PAX_ASLR
15931 +#ifdef CONFIG_X86_32
15932 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
15933 +
15934 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15935 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
15936 +#else
15937 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
15938 +
15939 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15940 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
15941 +#endif
15942 +#endif
15943
15944 /* This yields a mask that user programs can use to figure out what
15945 instruction set this CPU supports. This could be done in user space,
15946 @@ -296,16 +314,12 @@ do { \
15947
15948 #define ARCH_DLINFO \
15949 do { \
15950 - if (vdso_enabled) \
15951 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15952 - (unsigned long)current->mm->context.vdso); \
15953 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15954 } while (0)
15955
15956 #define ARCH_DLINFO_X32 \
15957 do { \
15958 - if (vdso_enabled) \
15959 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
15960 - (unsigned long)current->mm->context.vdso); \
15961 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
15962 } while (0)
15963
15964 #define AT_SYSINFO 32
15965 @@ -320,7 +334,7 @@ else \
15966
15967 #endif /* !CONFIG_X86_32 */
15968
15969 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
15970 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
15971
15972 #define VDSO_ENTRY \
15973 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
15974 @@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
15975 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
15976 #define compat_arch_setup_additional_pages syscall32_setup_pages
15977
15978 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
15979 -#define arch_randomize_brk arch_randomize_brk
15980 -
15981 /*
15982 * True on X86_32 or when emulating IA32 on X86_64
15983 */
15984 diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
15985 index 77a99ac..39ff7f5 100644
15986 --- a/arch/x86/include/asm/emergency-restart.h
15987 +++ b/arch/x86/include/asm/emergency-restart.h
15988 @@ -1,6 +1,6 @@
15989 #ifndef _ASM_X86_EMERGENCY_RESTART_H
15990 #define _ASM_X86_EMERGENCY_RESTART_H
15991
15992 -extern void machine_emergency_restart(void);
15993 +extern void machine_emergency_restart(void) __noreturn;
15994
15995 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
15996 diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
15997 index 4d0bda7..221da4d 100644
15998 --- a/arch/x86/include/asm/fpu-internal.h
15999 +++ b/arch/x86/include/asm/fpu-internal.h
16000 @@ -124,8 +124,11 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16001 #define user_insn(insn, output, input...) \
16002 ({ \
16003 int err; \
16004 + pax_open_userland(); \
16005 asm volatile(ASM_STAC "\n" \
16006 - "1:" #insn "\n\t" \
16007 + "1:" \
16008 + __copyuser_seg \
16009 + #insn "\n\t" \
16010 "2: " ASM_CLAC "\n" \
16011 ".section .fixup,\"ax\"\n" \
16012 "3: movl $-1,%[err]\n" \
16013 @@ -134,6 +137,7 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
16014 _ASM_EXTABLE(1b, 3b) \
16015 : [err] "=r" (err), output \
16016 : "0"(0), input); \
16017 + pax_close_userland(); \
16018 err; \
16019 })
16020
16021 @@ -298,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
16022 "emms\n\t" /* clear stack tags */
16023 "fildl %P[addr]", /* set F?P to defined value */
16024 X86_FEATURE_FXSAVE_LEAK,
16025 - [addr] "m" (tsk->thread.fpu.has_fpu));
16026 + [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
16027
16028 return fpu_restore_checking(&tsk->thread.fpu);
16029 }
16030 diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
16031 index be27ba1..04a8801 100644
16032 --- a/arch/x86/include/asm/futex.h
16033 +++ b/arch/x86/include/asm/futex.h
16034 @@ -12,6 +12,7 @@
16035 #include <asm/smap.h>
16036
16037 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
16038 + typecheck(u32 __user *, uaddr); \
16039 asm volatile("\t" ASM_STAC "\n" \
16040 "1:\t" insn "\n" \
16041 "2:\t" ASM_CLAC "\n" \
16042 @@ -20,15 +21,16 @@
16043 "\tjmp\t2b\n" \
16044 "\t.previous\n" \
16045 _ASM_EXTABLE(1b, 3b) \
16046 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
16047 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
16048 : "i" (-EFAULT), "0" (oparg), "1" (0))
16049
16050 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
16051 + typecheck(u32 __user *, uaddr); \
16052 asm volatile("\t" ASM_STAC "\n" \
16053 "1:\tmovl %2, %0\n" \
16054 "\tmovl\t%0, %3\n" \
16055 "\t" insn "\n" \
16056 - "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
16057 + "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
16058 "\tjnz\t1b\n" \
16059 "3:\t" ASM_CLAC "\n" \
16060 "\t.section .fixup,\"ax\"\n" \
16061 @@ -38,7 +40,7 @@
16062 _ASM_EXTABLE(1b, 4b) \
16063 _ASM_EXTABLE(2b, 4b) \
16064 : "=&a" (oldval), "=&r" (ret), \
16065 - "+m" (*uaddr), "=&r" (tem) \
16066 + "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
16067 : "r" (oparg), "i" (-EFAULT), "1" (0))
16068
16069 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16070 @@ -57,12 +59,13 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16071
16072 pagefault_disable();
16073
16074 + pax_open_userland();
16075 switch (op) {
16076 case FUTEX_OP_SET:
16077 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
16078 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
16079 break;
16080 case FUTEX_OP_ADD:
16081 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
16082 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
16083 uaddr, oparg);
16084 break;
16085 case FUTEX_OP_OR:
16086 @@ -77,6 +80,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
16087 default:
16088 ret = -ENOSYS;
16089 }
16090 + pax_close_userland();
16091
16092 pagefault_enable();
16093
16094 @@ -115,18 +119,20 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
16095 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
16096 return -EFAULT;
16097
16098 + pax_open_userland();
16099 asm volatile("\t" ASM_STAC "\n"
16100 - "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
16101 + "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
16102 "2:\t" ASM_CLAC "\n"
16103 "\t.section .fixup, \"ax\"\n"
16104 "3:\tmov %3, %0\n"
16105 "\tjmp 2b\n"
16106 "\t.previous\n"
16107 _ASM_EXTABLE(1b, 3b)
16108 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
16109 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
16110 : "i" (-EFAULT), "r" (newval), "1" (oldval)
16111 : "memory"
16112 );
16113 + pax_close_userland();
16114
16115 *uval = oldval;
16116 return ret;
16117 diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
16118 index 92b3bae..3866449 100644
16119 --- a/arch/x86/include/asm/hw_irq.h
16120 +++ b/arch/x86/include/asm/hw_irq.h
16121 @@ -165,8 +165,8 @@ extern void setup_ioapic_dest(void);
16122 extern void enable_IO_APIC(void);
16123
16124 /* Statistics */
16125 -extern atomic_t irq_err_count;
16126 -extern atomic_t irq_mis_count;
16127 +extern atomic_unchecked_t irq_err_count;
16128 +extern atomic_unchecked_t irq_mis_count;
16129
16130 /* EISA */
16131 extern void eisa_set_level_irq(unsigned int irq);
16132 diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
16133 index a203659..9889f1c 100644
16134 --- a/arch/x86/include/asm/i8259.h
16135 +++ b/arch/x86/include/asm/i8259.h
16136 @@ -62,7 +62,7 @@ struct legacy_pic {
16137 void (*init)(int auto_eoi);
16138 int (*irq_pending)(unsigned int irq);
16139 void (*make_irq)(unsigned int irq);
16140 -};
16141 +} __do_const;
16142
16143 extern struct legacy_pic *legacy_pic;
16144 extern struct legacy_pic null_legacy_pic;
16145 diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
16146 index 34f69cb..6d95446 100644
16147 --- a/arch/x86/include/asm/io.h
16148 +++ b/arch/x86/include/asm/io.h
16149 @@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
16150 "m" (*(volatile type __force *)addr) barrier); }
16151
16152 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
16153 -build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
16154 -build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
16155 +build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
16156 +build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
16157
16158 build_mmio_read(__readb, "b", unsigned char, "=q", )
16159 -build_mmio_read(__readw, "w", unsigned short, "=r", )
16160 -build_mmio_read(__readl, "l", unsigned int, "=r", )
16161 +build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
16162 +build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
16163
16164 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
16165 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
16166 @@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
16167 return ioremap_nocache(offset, size);
16168 }
16169
16170 -extern void iounmap(volatile void __iomem *addr);
16171 +extern void iounmap(const volatile void __iomem *addr);
16172
16173 extern void set_iounmap_nonlazy(void);
16174
16175 @@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
16176
16177 #include <linux/vmalloc.h>
16178
16179 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
16180 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
16181 +{
16182 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
16183 +}
16184 +
16185 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
16186 +{
16187 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
16188 +}
16189 +
16190 /*
16191 * Convert a virtual cached pointer to an uncached pointer
16192 */
16193 diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
16194 index bba3cf8..06bc8da 100644
16195 --- a/arch/x86/include/asm/irqflags.h
16196 +++ b/arch/x86/include/asm/irqflags.h
16197 @@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
16198 sti; \
16199 sysexit
16200
16201 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
16202 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
16203 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
16204 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
16205 +
16206 #else
16207 #define INTERRUPT_RETURN iret
16208 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
16209 diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
16210 index 9454c16..e4100e3 100644
16211 --- a/arch/x86/include/asm/kprobes.h
16212 +++ b/arch/x86/include/asm/kprobes.h
16213 @@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
16214 #define RELATIVEJUMP_SIZE 5
16215 #define RELATIVECALL_OPCODE 0xe8
16216 #define RELATIVE_ADDR_SIZE 4
16217 -#define MAX_STACK_SIZE 64
16218 -#define MIN_STACK_SIZE(ADDR) \
16219 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
16220 - THREAD_SIZE - (unsigned long)(ADDR))) \
16221 - ? (MAX_STACK_SIZE) \
16222 - : (((unsigned long)current_thread_info()) + \
16223 - THREAD_SIZE - (unsigned long)(ADDR)))
16224 +#define MAX_STACK_SIZE 64UL
16225 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
16226
16227 #define flush_insn_slot(p) do { } while (0)
16228
16229 diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
16230 index 2d89e39..baee879 100644
16231 --- a/arch/x86/include/asm/local.h
16232 +++ b/arch/x86/include/asm/local.h
16233 @@ -10,33 +10,97 @@ typedef struct {
16234 atomic_long_t a;
16235 } local_t;
16236
16237 +typedef struct {
16238 + atomic_long_unchecked_t a;
16239 +} local_unchecked_t;
16240 +
16241 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
16242
16243 #define local_read(l) atomic_long_read(&(l)->a)
16244 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
16245 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
16246 +#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
16247
16248 static inline void local_inc(local_t *l)
16249 {
16250 - asm volatile(_ASM_INC "%0"
16251 + asm volatile(_ASM_INC "%0\n"
16252 +
16253 +#ifdef CONFIG_PAX_REFCOUNT
16254 + "jno 0f\n"
16255 + _ASM_DEC "%0\n"
16256 + "int $4\n0:\n"
16257 + _ASM_EXTABLE(0b, 0b)
16258 +#endif
16259 +
16260 + : "+m" (l->a.counter));
16261 +}
16262 +
16263 +static inline void local_inc_unchecked(local_unchecked_t *l)
16264 +{
16265 + asm volatile(_ASM_INC "%0\n"
16266 : "+m" (l->a.counter));
16267 }
16268
16269 static inline void local_dec(local_t *l)
16270 {
16271 - asm volatile(_ASM_DEC "%0"
16272 + asm volatile(_ASM_DEC "%0\n"
16273 +
16274 +#ifdef CONFIG_PAX_REFCOUNT
16275 + "jno 0f\n"
16276 + _ASM_INC "%0\n"
16277 + "int $4\n0:\n"
16278 + _ASM_EXTABLE(0b, 0b)
16279 +#endif
16280 +
16281 + : "+m" (l->a.counter));
16282 +}
16283 +
16284 +static inline void local_dec_unchecked(local_unchecked_t *l)
16285 +{
16286 + asm volatile(_ASM_DEC "%0\n"
16287 : "+m" (l->a.counter));
16288 }
16289
16290 static inline void local_add(long i, local_t *l)
16291 {
16292 - asm volatile(_ASM_ADD "%1,%0"
16293 + asm volatile(_ASM_ADD "%1,%0\n"
16294 +
16295 +#ifdef CONFIG_PAX_REFCOUNT
16296 + "jno 0f\n"
16297 + _ASM_SUB "%1,%0\n"
16298 + "int $4\n0:\n"
16299 + _ASM_EXTABLE(0b, 0b)
16300 +#endif
16301 +
16302 + : "+m" (l->a.counter)
16303 + : "ir" (i));
16304 +}
16305 +
16306 +static inline void local_add_unchecked(long i, local_unchecked_t *l)
16307 +{
16308 + asm volatile(_ASM_ADD "%1,%0\n"
16309 : "+m" (l->a.counter)
16310 : "ir" (i));
16311 }
16312
16313 static inline void local_sub(long i, local_t *l)
16314 {
16315 - asm volatile(_ASM_SUB "%1,%0"
16316 + asm volatile(_ASM_SUB "%1,%0\n"
16317 +
16318 +#ifdef CONFIG_PAX_REFCOUNT
16319 + "jno 0f\n"
16320 + _ASM_ADD "%1,%0\n"
16321 + "int $4\n0:\n"
16322 + _ASM_EXTABLE(0b, 0b)
16323 +#endif
16324 +
16325 + : "+m" (l->a.counter)
16326 + : "ir" (i));
16327 +}
16328 +
16329 +static inline void local_sub_unchecked(long i, local_unchecked_t *l)
16330 +{
16331 + asm volatile(_ASM_SUB "%1,%0\n"
16332 : "+m" (l->a.counter)
16333 : "ir" (i));
16334 }
16335 @@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
16336 {
16337 unsigned char c;
16338
16339 - asm volatile(_ASM_SUB "%2,%0; sete %1"
16340 + asm volatile(_ASM_SUB "%2,%0\n"
16341 +
16342 +#ifdef CONFIG_PAX_REFCOUNT
16343 + "jno 0f\n"
16344 + _ASM_ADD "%2,%0\n"
16345 + "int $4\n0:\n"
16346 + _ASM_EXTABLE(0b, 0b)
16347 +#endif
16348 +
16349 + "sete %1\n"
16350 : "+m" (l->a.counter), "=qm" (c)
16351 : "ir" (i) : "memory");
16352 return c;
16353 @@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
16354 {
16355 unsigned char c;
16356
16357 - asm volatile(_ASM_DEC "%0; sete %1"
16358 + asm volatile(_ASM_DEC "%0\n"
16359 +
16360 +#ifdef CONFIG_PAX_REFCOUNT
16361 + "jno 0f\n"
16362 + _ASM_INC "%0\n"
16363 + "int $4\n0:\n"
16364 + _ASM_EXTABLE(0b, 0b)
16365 +#endif
16366 +
16367 + "sete %1\n"
16368 : "+m" (l->a.counter), "=qm" (c)
16369 : : "memory");
16370 return c != 0;
16371 @@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
16372 {
16373 unsigned char c;
16374
16375 - asm volatile(_ASM_INC "%0; sete %1"
16376 + asm volatile(_ASM_INC "%0\n"
16377 +
16378 +#ifdef CONFIG_PAX_REFCOUNT
16379 + "jno 0f\n"
16380 + _ASM_DEC "%0\n"
16381 + "int $4\n0:\n"
16382 + _ASM_EXTABLE(0b, 0b)
16383 +#endif
16384 +
16385 + "sete %1\n"
16386 : "+m" (l->a.counter), "=qm" (c)
16387 : : "memory");
16388 return c != 0;
16389 @@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
16390 {
16391 unsigned char c;
16392
16393 - asm volatile(_ASM_ADD "%2,%0; sets %1"
16394 + asm volatile(_ASM_ADD "%2,%0\n"
16395 +
16396 +#ifdef CONFIG_PAX_REFCOUNT
16397 + "jno 0f\n"
16398 + _ASM_SUB "%2,%0\n"
16399 + "int $4\n0:\n"
16400 + _ASM_EXTABLE(0b, 0b)
16401 +#endif
16402 +
16403 + "sets %1\n"
16404 : "+m" (l->a.counter), "=qm" (c)
16405 : "ir" (i) : "memory");
16406 return c;
16407 @@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
16408 static inline long local_add_return(long i, local_t *l)
16409 {
16410 long __i = i;
16411 + asm volatile(_ASM_XADD "%0, %1\n"
16412 +
16413 +#ifdef CONFIG_PAX_REFCOUNT
16414 + "jno 0f\n"
16415 + _ASM_MOV "%0,%1\n"
16416 + "int $4\n0:\n"
16417 + _ASM_EXTABLE(0b, 0b)
16418 +#endif
16419 +
16420 + : "+r" (i), "+m" (l->a.counter)
16421 + : : "memory");
16422 + return i + __i;
16423 +}
16424 +
16425 +/**
16426 + * local_add_return_unchecked - add and return
16427 + * @i: integer value to add
16428 + * @l: pointer to type local_unchecked_t
16429 + *
16430 + * Atomically adds @i to @l and returns @i + @l
16431 + */
16432 +static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
16433 +{
16434 + long __i = i;
16435 asm volatile(_ASM_XADD "%0, %1;"
16436 : "+r" (i), "+m" (l->a.counter)
16437 : : "memory");
16438 @@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
16439
16440 #define local_cmpxchg(l, o, n) \
16441 (cmpxchg_local(&((l)->a.counter), (o), (n)))
16442 +#define local_cmpxchg_unchecked(l, o, n) \
16443 + (cmpxchg_local(&((l)->a.counter), (o), (n)))
16444 /* Always has a lock prefix */
16445 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
16446
16447 diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
16448 new file mode 100644
16449 index 0000000..2bfd3ba
16450 --- /dev/null
16451 +++ b/arch/x86/include/asm/mman.h
16452 @@ -0,0 +1,15 @@
16453 +#ifndef _X86_MMAN_H
16454 +#define _X86_MMAN_H
16455 +
16456 +#include <uapi/asm/mman.h>
16457 +
16458 +#ifdef __KERNEL__
16459 +#ifndef __ASSEMBLY__
16460 +#ifdef CONFIG_X86_32
16461 +#define arch_mmap_check i386_mmap_check
16462 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
16463 +#endif
16464 +#endif
16465 +#endif
16466 +
16467 +#endif /* X86_MMAN_H */
16468 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
16469 index 5f55e69..e20bfb1 100644
16470 --- a/arch/x86/include/asm/mmu.h
16471 +++ b/arch/x86/include/asm/mmu.h
16472 @@ -9,7 +9,7 @@
16473 * we put the segment information here.
16474 */
16475 typedef struct {
16476 - void *ldt;
16477 + struct desc_struct *ldt;
16478 int size;
16479
16480 #ifdef CONFIG_X86_64
16481 @@ -18,7 +18,19 @@ typedef struct {
16482 #endif
16483
16484 struct mutex lock;
16485 - void *vdso;
16486 + unsigned long vdso;
16487 +
16488 +#ifdef CONFIG_X86_32
16489 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
16490 + unsigned long user_cs_base;
16491 + unsigned long user_cs_limit;
16492 +
16493 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16494 + cpumask_t cpu_user_cs_mask;
16495 +#endif
16496 +
16497 +#endif
16498 +#endif
16499 } mm_context_t;
16500
16501 #ifdef CONFIG_SMP
16502 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
16503 index be12c53..4d24039 100644
16504 --- a/arch/x86/include/asm/mmu_context.h
16505 +++ b/arch/x86/include/asm/mmu_context.h
16506 @@ -24,6 +24,20 @@ void destroy_context(struct mm_struct *mm);
16507
16508 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
16509 {
16510 +
16511 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16512 + if (!(static_cpu_has(X86_FEATURE_PCID))) {
16513 + unsigned int i;
16514 + pgd_t *pgd;
16515 +
16516 + pax_open_kernel();
16517 + pgd = get_cpu_pgd(smp_processor_id(), kernel);
16518 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
16519 + set_pgd_batched(pgd+i, native_make_pgd(0));
16520 + pax_close_kernel();
16521 + }
16522 +#endif
16523 +
16524 #ifdef CONFIG_SMP
16525 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
16526 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
16527 @@ -34,16 +48,59 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16528 struct task_struct *tsk)
16529 {
16530 unsigned cpu = smp_processor_id();
16531 +#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16532 + int tlbstate = TLBSTATE_OK;
16533 +#endif
16534
16535 if (likely(prev != next)) {
16536 #ifdef CONFIG_SMP
16537 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16538 + tlbstate = this_cpu_read(cpu_tlbstate.state);
16539 +#endif
16540 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
16541 this_cpu_write(cpu_tlbstate.active_mm, next);
16542 #endif
16543 cpumask_set_cpu(cpu, mm_cpumask(next));
16544
16545 /* Re-load page tables */
16546 +#ifdef CONFIG_PAX_PER_CPU_PGD
16547 + pax_open_kernel();
16548 +
16549 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16550 + if (static_cpu_has(X86_FEATURE_PCID))
16551 + __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
16552 + else
16553 +#endif
16554 +
16555 + __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
16556 + __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
16557 + pax_close_kernel();
16558 + BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
16559 +
16560 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16561 + if (static_cpu_has(X86_FEATURE_PCID)) {
16562 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
16563 + u64 descriptor[2];
16564 + descriptor[0] = PCID_USER;
16565 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16566 + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
16567 + descriptor[0] = PCID_KERNEL;
16568 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16569 + }
16570 + } else {
16571 + write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
16572 + if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
16573 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
16574 + else
16575 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
16576 + }
16577 + } else
16578 +#endif
16579 +
16580 + load_cr3(get_cpu_pgd(cpu, kernel));
16581 +#else
16582 load_cr3(next->pgd);
16583 +#endif
16584
16585 /* Stop flush ipis for the previous mm */
16586 cpumask_clear_cpu(cpu, mm_cpumask(prev));
16587 @@ -51,9 +108,67 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16588 /* Load the LDT, if the LDT is different: */
16589 if (unlikely(prev->context.ldt != next->context.ldt))
16590 load_LDT_nolock(&next->context);
16591 +
16592 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
16593 + if (!(__supported_pte_mask & _PAGE_NX)) {
16594 + smp_mb__before_clear_bit();
16595 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
16596 + smp_mb__after_clear_bit();
16597 + cpu_set(cpu, next->context.cpu_user_cs_mask);
16598 + }
16599 +#endif
16600 +
16601 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16602 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
16603 + prev->context.user_cs_limit != next->context.user_cs_limit))
16604 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
16605 +#ifdef CONFIG_SMP
16606 + else if (unlikely(tlbstate != TLBSTATE_OK))
16607 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
16608 +#endif
16609 +#endif
16610 +
16611 }
16612 + else {
16613 +
16614 +#ifdef CONFIG_PAX_PER_CPU_PGD
16615 + pax_open_kernel();
16616 +
16617 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16618 + if (static_cpu_has(X86_FEATURE_PCID))
16619 + __clone_user_pgds(get_cpu_pgd(cpu, user), next->pgd);
16620 + else
16621 +#endif
16622 +
16623 + __clone_user_pgds(get_cpu_pgd(cpu, kernel), next->pgd);
16624 + __shadow_user_pgds(get_cpu_pgd(cpu, kernel) + USER_PGD_PTRS, next->pgd);
16625 + pax_close_kernel();
16626 + BUG_ON((__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL) != (read_cr3() & __PHYSICAL_MASK) && (__pa(get_cpu_pgd(cpu, user)) | PCID_USER) != (read_cr3() & __PHYSICAL_MASK));
16627 +
16628 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
16629 + if (static_cpu_has(X86_FEATURE_PCID)) {
16630 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
16631 + u64 descriptor[2];
16632 + descriptor[0] = PCID_USER;
16633 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16634 + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF)) {
16635 + descriptor[0] = PCID_KERNEL;
16636 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_CONTEXT) : "memory");
16637 + }
16638 + } else {
16639 + write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
16640 + if (static_cpu_has(X86_FEATURE_STRONGUDEREF))
16641 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
16642 + else
16643 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
16644 + }
16645 + } else
16646 +#endif
16647 +
16648 + load_cr3(get_cpu_pgd(cpu, kernel));
16649 +#endif
16650 +
16651 #ifdef CONFIG_SMP
16652 - else {
16653 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
16654 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
16655
16656 @@ -70,11 +185,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
16657 * tlb flush IPI delivery. We must reload CR3
16658 * to make sure to use no freed page tables.
16659 */
16660 +
16661 +#ifndef CONFIG_PAX_PER_CPU_PGD
16662 load_cr3(next->pgd);
16663 +#endif
16664 +
16665 load_LDT_nolock(&next->context);
16666 +
16667 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
16668 + if (!(__supported_pte_mask & _PAGE_NX))
16669 + cpu_set(cpu, next->context.cpu_user_cs_mask);
16670 +#endif
16671 +
16672 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
16673 +#ifdef CONFIG_PAX_PAGEEXEC
16674 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
16675 +#endif
16676 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
16677 +#endif
16678 +
16679 }
16680 +#endif
16681 }
16682 -#endif
16683 }
16684
16685 #define activate_mm(prev, next) \
16686 diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
16687 index e3b7819..b257c64 100644
16688 --- a/arch/x86/include/asm/module.h
16689 +++ b/arch/x86/include/asm/module.h
16690 @@ -5,6 +5,7 @@
16691
16692 #ifdef CONFIG_X86_64
16693 /* X86_64 does not define MODULE_PROC_FAMILY */
16694 +#define MODULE_PROC_FAMILY ""
16695 #elif defined CONFIG_M486
16696 #define MODULE_PROC_FAMILY "486 "
16697 #elif defined CONFIG_M586
16698 @@ -57,8 +58,20 @@
16699 #error unknown processor family
16700 #endif
16701
16702 -#ifdef CONFIG_X86_32
16703 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
16704 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
16705 +#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
16706 +#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
16707 +#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
16708 +#else
16709 +#define MODULE_PAX_KERNEXEC ""
16710 #endif
16711
16712 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16713 +#define MODULE_PAX_UDEREF "UDEREF "
16714 +#else
16715 +#define MODULE_PAX_UDEREF ""
16716 +#endif
16717 +
16718 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
16719 +
16720 #endif /* _ASM_X86_MODULE_H */
16721 diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
16722 index 86f9301..b365cda 100644
16723 --- a/arch/x86/include/asm/nmi.h
16724 +++ b/arch/x86/include/asm/nmi.h
16725 @@ -40,11 +40,11 @@ struct nmiaction {
16726 nmi_handler_t handler;
16727 unsigned long flags;
16728 const char *name;
16729 -};
16730 +} __do_const;
16731
16732 #define register_nmi_handler(t, fn, fg, n, init...) \
16733 ({ \
16734 - static struct nmiaction init fn##_na = { \
16735 + static const struct nmiaction init fn##_na = { \
16736 .handler = (fn), \
16737 .name = (n), \
16738 .flags = (fg), \
16739 @@ -52,7 +52,7 @@ struct nmiaction {
16740 __register_nmi_handler((t), &fn##_na); \
16741 })
16742
16743 -int __register_nmi_handler(unsigned int, struct nmiaction *);
16744 +int __register_nmi_handler(unsigned int, const struct nmiaction *);
16745
16746 void unregister_nmi_handler(unsigned int, const char *);
16747
16748 diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
16749 index c878924..21f4889 100644
16750 --- a/arch/x86/include/asm/page.h
16751 +++ b/arch/x86/include/asm/page.h
16752 @@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
16753 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
16754
16755 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
16756 +#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
16757
16758 #define __boot_va(x) __va(x)
16759 #define __boot_pa(x) __pa(x)
16760 diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
16761 index 0f1ddee..e2fc3d1 100644
16762 --- a/arch/x86/include/asm/page_64.h
16763 +++ b/arch/x86/include/asm/page_64.h
16764 @@ -7,9 +7,9 @@
16765
16766 /* duplicated to the one in bootmem.h */
16767 extern unsigned long max_pfn;
16768 -extern unsigned long phys_base;
16769 +extern const unsigned long phys_base;
16770
16771 -static inline unsigned long __phys_addr_nodebug(unsigned long x)
16772 +static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
16773 {
16774 unsigned long y = x - __START_KERNEL_map;
16775
16776 diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
16777 index 401f350..dee5d13 100644
16778 --- a/arch/x86/include/asm/paravirt.h
16779 +++ b/arch/x86/include/asm/paravirt.h
16780 @@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
16781 return (pmd_t) { ret };
16782 }
16783
16784 -static inline pmdval_t pmd_val(pmd_t pmd)
16785 +static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
16786 {
16787 pmdval_t ret;
16788
16789 @@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
16790 val);
16791 }
16792
16793 +static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
16794 +{
16795 + pgdval_t val = native_pgd_val(pgd);
16796 +
16797 + if (sizeof(pgdval_t) > sizeof(long))
16798 + PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
16799 + val, (u64)val >> 32);
16800 + else
16801 + PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
16802 + val);
16803 +}
16804 +
16805 static inline void pgd_clear(pgd_t *pgdp)
16806 {
16807 set_pgd(pgdp, __pgd(0));
16808 @@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
16809 pv_mmu_ops.set_fixmap(idx, phys, flags);
16810 }
16811
16812 +#ifdef CONFIG_PAX_KERNEXEC
16813 +static inline unsigned long pax_open_kernel(void)
16814 +{
16815 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
16816 +}
16817 +
16818 +static inline unsigned long pax_close_kernel(void)
16819 +{
16820 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
16821 +}
16822 +#else
16823 +static inline unsigned long pax_open_kernel(void) { return 0; }
16824 +static inline unsigned long pax_close_kernel(void) { return 0; }
16825 +#endif
16826 +
16827 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
16828
16829 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
16830 @@ -906,7 +933,7 @@ extern void default_banner(void);
16831
16832 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
16833 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
16834 -#define PARA_INDIRECT(addr) *%cs:addr
16835 +#define PARA_INDIRECT(addr) *%ss:addr
16836 #endif
16837
16838 #define INTERRUPT_RETURN \
16839 @@ -981,6 +1008,21 @@ extern void default_banner(void);
16840 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
16841 CLBR_NONE, \
16842 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
16843 +
16844 +#define GET_CR0_INTO_RDI \
16845 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
16846 + mov %rax,%rdi
16847 +
16848 +#define SET_RDI_INTO_CR0 \
16849 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
16850 +
16851 +#define GET_CR3_INTO_RDI \
16852 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
16853 + mov %rax,%rdi
16854 +
16855 +#define SET_RDI_INTO_CR3 \
16856 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
16857 +
16858 #endif /* CONFIG_X86_32 */
16859
16860 #endif /* __ASSEMBLY__ */
16861 diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
16862 index aab8f67..2531748 100644
16863 --- a/arch/x86/include/asm/paravirt_types.h
16864 +++ b/arch/x86/include/asm/paravirt_types.h
16865 @@ -84,7 +84,7 @@ struct pv_init_ops {
16866 */
16867 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
16868 unsigned long addr, unsigned len);
16869 -};
16870 +} __no_const;
16871
16872
16873 struct pv_lazy_ops {
16874 @@ -98,7 +98,7 @@ struct pv_time_ops {
16875 unsigned long long (*sched_clock)(void);
16876 unsigned long long (*steal_clock)(int cpu);
16877 unsigned long (*get_tsc_khz)(void);
16878 -};
16879 +} __no_const;
16880
16881 struct pv_cpu_ops {
16882 /* hooks for various privileged instructions */
16883 @@ -192,7 +192,7 @@ struct pv_cpu_ops {
16884
16885 void (*start_context_switch)(struct task_struct *prev);
16886 void (*end_context_switch)(struct task_struct *next);
16887 -};
16888 +} __no_const;
16889
16890 struct pv_irq_ops {
16891 /*
16892 @@ -223,7 +223,7 @@ struct pv_apic_ops {
16893 unsigned long start_eip,
16894 unsigned long start_esp);
16895 #endif
16896 -};
16897 +} __no_const;
16898
16899 struct pv_mmu_ops {
16900 unsigned long (*read_cr2)(void);
16901 @@ -313,6 +313,7 @@ struct pv_mmu_ops {
16902 struct paravirt_callee_save make_pud;
16903
16904 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
16905 + void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
16906 #endif /* PAGETABLE_LEVELS == 4 */
16907 #endif /* PAGETABLE_LEVELS >= 3 */
16908
16909 @@ -324,6 +325,12 @@ struct pv_mmu_ops {
16910 an mfn. We can tell which is which from the index. */
16911 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
16912 phys_addr_t phys, pgprot_t flags);
16913 +
16914 +#ifdef CONFIG_PAX_KERNEXEC
16915 + unsigned long (*pax_open_kernel)(void);
16916 + unsigned long (*pax_close_kernel)(void);
16917 +#endif
16918 +
16919 };
16920
16921 struct arch_spinlock;
16922 diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
16923 index b4389a4..7024269 100644
16924 --- a/arch/x86/include/asm/pgalloc.h
16925 +++ b/arch/x86/include/asm/pgalloc.h
16926 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
16927 pmd_t *pmd, pte_t *pte)
16928 {
16929 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16930 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
16931 +}
16932 +
16933 +static inline void pmd_populate_user(struct mm_struct *mm,
16934 + pmd_t *pmd, pte_t *pte)
16935 +{
16936 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
16937 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
16938 }
16939
16940 @@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
16941
16942 #ifdef CONFIG_X86_PAE
16943 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
16944 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
16945 +{
16946 + pud_populate(mm, pudp, pmd);
16947 +}
16948 #else /* !CONFIG_X86_PAE */
16949 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
16950 {
16951 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
16952 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
16953 }
16954 +
16955 +static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
16956 +{
16957 + paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
16958 + set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
16959 +}
16960 #endif /* CONFIG_X86_PAE */
16961
16962 #if PAGETABLE_LEVELS > 3
16963 @@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
16964 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
16965 }
16966
16967 +static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
16968 +{
16969 + paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
16970 + set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
16971 +}
16972 +
16973 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
16974 {
16975 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
16976 diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
16977 index 3bf2dd0..23d2a9f 100644
16978 --- a/arch/x86/include/asm/pgtable-2level.h
16979 +++ b/arch/x86/include/asm/pgtable-2level.h
16980 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
16981
16982 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
16983 {
16984 + pax_open_kernel();
16985 *pmdp = pmd;
16986 + pax_close_kernel();
16987 }
16988
16989 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
16990 diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
16991 index 81bb91b..9392125 100644
16992 --- a/arch/x86/include/asm/pgtable-3level.h
16993 +++ b/arch/x86/include/asm/pgtable-3level.h
16994 @@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
16995
16996 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
16997 {
16998 + pax_open_kernel();
16999 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
17000 + pax_close_kernel();
17001 }
17002
17003 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17004 {
17005 + pax_open_kernel();
17006 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
17007 + pax_close_kernel();
17008 }
17009
17010 /*
17011 diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
17012 index 3d19994..732a48c 100644
17013 --- a/arch/x86/include/asm/pgtable.h
17014 +++ b/arch/x86/include/asm/pgtable.h
17015 @@ -45,6 +45,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17016
17017 #ifndef __PAGETABLE_PUD_FOLDED
17018 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
17019 +#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
17020 #define pgd_clear(pgd) native_pgd_clear(pgd)
17021 #endif
17022
17023 @@ -82,12 +83,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
17024
17025 #define arch_end_context_switch(prev) do {} while(0)
17026
17027 +#define pax_open_kernel() native_pax_open_kernel()
17028 +#define pax_close_kernel() native_pax_close_kernel()
17029 #endif /* CONFIG_PARAVIRT */
17030
17031 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
17032 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
17033 +
17034 +#ifdef CONFIG_PAX_KERNEXEC
17035 +static inline unsigned long native_pax_open_kernel(void)
17036 +{
17037 + unsigned long cr0;
17038 +
17039 + preempt_disable();
17040 + barrier();
17041 + cr0 = read_cr0() ^ X86_CR0_WP;
17042 + BUG_ON(cr0 & X86_CR0_WP);
17043 + write_cr0(cr0);
17044 + return cr0 ^ X86_CR0_WP;
17045 +}
17046 +
17047 +static inline unsigned long native_pax_close_kernel(void)
17048 +{
17049 + unsigned long cr0;
17050 +
17051 + cr0 = read_cr0() ^ X86_CR0_WP;
17052 + BUG_ON(!(cr0 & X86_CR0_WP));
17053 + write_cr0(cr0);
17054 + barrier();
17055 + preempt_enable_no_resched();
17056 + return cr0 ^ X86_CR0_WP;
17057 +}
17058 +#else
17059 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
17060 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
17061 +#endif
17062 +
17063 /*
17064 * The following only work if pte_present() is true.
17065 * Undefined behaviour if not..
17066 */
17067 +static inline int pte_user(pte_t pte)
17068 +{
17069 + return pte_val(pte) & _PAGE_USER;
17070 +}
17071 +
17072 static inline int pte_dirty(pte_t pte)
17073 {
17074 return pte_flags(pte) & _PAGE_DIRTY;
17075 @@ -148,6 +188,11 @@ static inline unsigned long pud_pfn(pud_t pud)
17076 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
17077 }
17078
17079 +static inline unsigned long pgd_pfn(pgd_t pgd)
17080 +{
17081 + return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
17082 +}
17083 +
17084 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
17085
17086 static inline int pmd_large(pmd_t pte)
17087 @@ -201,9 +246,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
17088 return pte_clear_flags(pte, _PAGE_RW);
17089 }
17090
17091 +static inline pte_t pte_mkread(pte_t pte)
17092 +{
17093 + return __pte(pte_val(pte) | _PAGE_USER);
17094 +}
17095 +
17096 static inline pte_t pte_mkexec(pte_t pte)
17097 {
17098 - return pte_clear_flags(pte, _PAGE_NX);
17099 +#ifdef CONFIG_X86_PAE
17100 + if (__supported_pte_mask & _PAGE_NX)
17101 + return pte_clear_flags(pte, _PAGE_NX);
17102 + else
17103 +#endif
17104 + return pte_set_flags(pte, _PAGE_USER);
17105 +}
17106 +
17107 +static inline pte_t pte_exprotect(pte_t pte)
17108 +{
17109 +#ifdef CONFIG_X86_PAE
17110 + if (__supported_pte_mask & _PAGE_NX)
17111 + return pte_set_flags(pte, _PAGE_NX);
17112 + else
17113 +#endif
17114 + return pte_clear_flags(pte, _PAGE_USER);
17115 }
17116
17117 static inline pte_t pte_mkdirty(pte_t pte)
17118 @@ -430,6 +495,16 @@ pte_t *populate_extra_pte(unsigned long vaddr);
17119 #endif
17120
17121 #ifndef __ASSEMBLY__
17122 +
17123 +#ifdef CONFIG_PAX_PER_CPU_PGD
17124 +extern pgd_t cpu_pgd[NR_CPUS][2][PTRS_PER_PGD];
17125 +enum cpu_pgd_type {kernel = 0, user = 1};
17126 +static inline pgd_t *get_cpu_pgd(unsigned int cpu, enum cpu_pgd_type type)
17127 +{
17128 + return cpu_pgd[cpu][type];
17129 +}
17130 +#endif
17131 +
17132 #include <linux/mm_types.h>
17133 #include <linux/mmdebug.h>
17134 #include <linux/log2.h>
17135 @@ -563,7 +638,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
17136 * Currently stuck as a macro due to indirect forward reference to
17137 * linux/mmzone.h's __section_mem_map_addr() definition:
17138 */
17139 -#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
17140 +#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
17141
17142 /* Find an entry in the second-level page table.. */
17143 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
17144 @@ -603,7 +678,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
17145 * Currently stuck as a macro due to indirect forward reference to
17146 * linux/mmzone.h's __section_mem_map_addr() definition:
17147 */
17148 -#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
17149 +#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
17150
17151 /* to find an entry in a page-table-directory. */
17152 static inline unsigned long pud_index(unsigned long address)
17153 @@ -618,7 +693,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
17154
17155 static inline int pgd_bad(pgd_t pgd)
17156 {
17157 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
17158 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
17159 }
17160
17161 static inline int pgd_none(pgd_t pgd)
17162 @@ -641,7 +716,12 @@ static inline int pgd_none(pgd_t pgd)
17163 * pgd_offset() returns a (pgd_t *)
17164 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
17165 */
17166 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
17167 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
17168 +
17169 +#ifdef CONFIG_PAX_PER_CPU_PGD
17170 +#define pgd_offset_cpu(cpu, type, address) (get_cpu_pgd(cpu, type) + pgd_index(address))
17171 +#endif
17172 +
17173 /*
17174 * a shortcut which implies the use of the kernel's pgd, instead
17175 * of a process's
17176 @@ -652,6 +732,23 @@ static inline int pgd_none(pgd_t pgd)
17177 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
17178 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
17179
17180 +#ifdef CONFIG_X86_32
17181 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
17182 +#else
17183 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
17184 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
17185 +
17186 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17187 +#ifdef __ASSEMBLY__
17188 +#define pax_user_shadow_base pax_user_shadow_base(%rip)
17189 +#else
17190 +extern unsigned long pax_user_shadow_base;
17191 +extern pgdval_t clone_pgd_mask;
17192 +#endif
17193 +#endif
17194 +
17195 +#endif
17196 +
17197 #ifndef __ASSEMBLY__
17198
17199 extern int direct_gbpages;
17200 @@ -818,11 +915,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
17201 * dst and src can be on the same page, but the range must not overlap,
17202 * and must not cross a page boundary.
17203 */
17204 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
17205 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
17206 {
17207 - memcpy(dst, src, count * sizeof(pgd_t));
17208 + pax_open_kernel();
17209 + while (count--)
17210 + *dst++ = *src++;
17211 + pax_close_kernel();
17212 }
17213
17214 +#ifdef CONFIG_PAX_PER_CPU_PGD
17215 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
17216 +#endif
17217 +
17218 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17219 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
17220 +#else
17221 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
17222 +#endif
17223 +
17224 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
17225 static inline int page_level_shift(enum pg_level level)
17226 {
17227 diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
17228 index 9ee3221..b979c6b 100644
17229 --- a/arch/x86/include/asm/pgtable_32.h
17230 +++ b/arch/x86/include/asm/pgtable_32.h
17231 @@ -25,9 +25,6 @@
17232 struct mm_struct;
17233 struct vm_area_struct;
17234
17235 -extern pgd_t swapper_pg_dir[1024];
17236 -extern pgd_t initial_page_table[1024];
17237 -
17238 static inline void pgtable_cache_init(void) { }
17239 static inline void check_pgt_cache(void) { }
17240 void paging_init(void);
17241 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
17242 # include <asm/pgtable-2level.h>
17243 #endif
17244
17245 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
17246 +extern pgd_t initial_page_table[PTRS_PER_PGD];
17247 +#ifdef CONFIG_X86_PAE
17248 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
17249 +#endif
17250 +
17251 #if defined(CONFIG_HIGHPTE)
17252 #define pte_offset_map(dir, address) \
17253 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
17254 @@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
17255 /* Clear a kernel PTE and flush it from the TLB */
17256 #define kpte_clear_flush(ptep, vaddr) \
17257 do { \
17258 + pax_open_kernel(); \
17259 pte_clear(&init_mm, (vaddr), (ptep)); \
17260 + pax_close_kernel(); \
17261 __flush_tlb_one((vaddr)); \
17262 } while (0)
17263
17264 #endif /* !__ASSEMBLY__ */
17265
17266 +#define HAVE_ARCH_UNMAPPED_AREA
17267 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
17268 +
17269 /*
17270 * kern_addr_valid() is (1) for FLATMEM and (0) for
17271 * SPARSEMEM and DISCONTIGMEM
17272 diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
17273 index ed5903b..c7fe163 100644
17274 --- a/arch/x86/include/asm/pgtable_32_types.h
17275 +++ b/arch/x86/include/asm/pgtable_32_types.h
17276 @@ -8,7 +8,7 @@
17277 */
17278 #ifdef CONFIG_X86_PAE
17279 # include <asm/pgtable-3level_types.h>
17280 -# define PMD_SIZE (1UL << PMD_SHIFT)
17281 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
17282 # define PMD_MASK (~(PMD_SIZE - 1))
17283 #else
17284 # include <asm/pgtable-2level_types.h>
17285 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
17286 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
17287 #endif
17288
17289 +#ifdef CONFIG_PAX_KERNEXEC
17290 +#ifndef __ASSEMBLY__
17291 +extern unsigned char MODULES_EXEC_VADDR[];
17292 +extern unsigned char MODULES_EXEC_END[];
17293 +#endif
17294 +#include <asm/boot.h>
17295 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
17296 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
17297 +#else
17298 +#define ktla_ktva(addr) (addr)
17299 +#define ktva_ktla(addr) (addr)
17300 +#endif
17301 +
17302 #define MODULES_VADDR VMALLOC_START
17303 #define MODULES_END VMALLOC_END
17304 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
17305 diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
17306 index e22c1db..23a625a 100644
17307 --- a/arch/x86/include/asm/pgtable_64.h
17308 +++ b/arch/x86/include/asm/pgtable_64.h
17309 @@ -16,10 +16,14 @@
17310
17311 extern pud_t level3_kernel_pgt[512];
17312 extern pud_t level3_ident_pgt[512];
17313 +extern pud_t level3_vmalloc_start_pgt[512];
17314 +extern pud_t level3_vmalloc_end_pgt[512];
17315 +extern pud_t level3_vmemmap_pgt[512];
17316 +extern pud_t level2_vmemmap_pgt[512];
17317 extern pmd_t level2_kernel_pgt[512];
17318 extern pmd_t level2_fixmap_pgt[512];
17319 -extern pmd_t level2_ident_pgt[512];
17320 -extern pgd_t init_level4_pgt[];
17321 +extern pmd_t level2_ident_pgt[512*2];
17322 +extern pgd_t init_level4_pgt[512];
17323
17324 #define swapper_pg_dir init_level4_pgt
17325
17326 @@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
17327
17328 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
17329 {
17330 + pax_open_kernel();
17331 *pmdp = pmd;
17332 + pax_close_kernel();
17333 }
17334
17335 static inline void native_pmd_clear(pmd_t *pmd)
17336 @@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
17337
17338 static inline void native_set_pud(pud_t *pudp, pud_t pud)
17339 {
17340 + pax_open_kernel();
17341 *pudp = pud;
17342 + pax_close_kernel();
17343 }
17344
17345 static inline void native_pud_clear(pud_t *pud)
17346 @@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
17347
17348 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
17349 {
17350 + pax_open_kernel();
17351 + *pgdp = pgd;
17352 + pax_close_kernel();
17353 +}
17354 +
17355 +static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
17356 +{
17357 *pgdp = pgd;
17358 }
17359
17360 diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
17361 index 2d88344..4679fc3 100644
17362 --- a/arch/x86/include/asm/pgtable_64_types.h
17363 +++ b/arch/x86/include/asm/pgtable_64_types.h
17364 @@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
17365 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
17366 #define MODULES_END _AC(0xffffffffff000000, UL)
17367 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
17368 +#define MODULES_EXEC_VADDR MODULES_VADDR
17369 +#define MODULES_EXEC_END MODULES_END
17370 +
17371 +#define ktla_ktva(addr) (addr)
17372 +#define ktva_ktla(addr) (addr)
17373
17374 #define EARLY_DYNAMIC_PAGE_TABLES 64
17375
17376 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
17377 index 0ecac25..306c276 100644
17378 --- a/arch/x86/include/asm/pgtable_types.h
17379 +++ b/arch/x86/include/asm/pgtable_types.h
17380 @@ -16,13 +16,12 @@
17381 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
17382 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
17383 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
17384 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
17385 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
17386 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
17387 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
17388 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
17389 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
17390 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
17391 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
17392 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
17393 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
17394 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
17395
17396 /* If _PAGE_BIT_PRESENT is clear, we use these: */
17397 @@ -40,7 +39,6 @@
17398 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
17399 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
17400 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
17401 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
17402 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
17403 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
17404 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
17405 @@ -87,8 +85,10 @@
17406
17407 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
17408 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
17409 -#else
17410 +#elif defined(CONFIG_KMEMCHECK)
17411 #define _PAGE_NX (_AT(pteval_t, 0))
17412 +#else
17413 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
17414 #endif
17415
17416 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
17417 @@ -146,6 +146,9 @@
17418 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
17419 _PAGE_ACCESSED)
17420
17421 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
17422 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
17423 +
17424 #define __PAGE_KERNEL_EXEC \
17425 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
17426 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
17427 @@ -156,7 +159,7 @@
17428 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
17429 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
17430 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
17431 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
17432 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
17433 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
17434 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
17435 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
17436 @@ -218,8 +221,8 @@
17437 * bits are combined, this will alow user to access the high address mapped
17438 * VDSO in the presence of CONFIG_COMPAT_VDSO
17439 */
17440 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
17441 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
17442 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
17443 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
17444 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
17445 #endif
17446
17447 @@ -257,7 +260,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
17448 {
17449 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
17450 }
17451 +#endif
17452
17453 +#if PAGETABLE_LEVELS == 3
17454 +#include <asm-generic/pgtable-nopud.h>
17455 +#endif
17456 +
17457 +#if PAGETABLE_LEVELS == 2
17458 +#include <asm-generic/pgtable-nopmd.h>
17459 +#endif
17460 +
17461 +#ifndef __ASSEMBLY__
17462 #if PAGETABLE_LEVELS > 3
17463 typedef struct { pudval_t pud; } pud_t;
17464
17465 @@ -271,8 +284,6 @@ static inline pudval_t native_pud_val(pud_t pud)
17466 return pud.pud;
17467 }
17468 #else
17469 -#include <asm-generic/pgtable-nopud.h>
17470 -
17471 static inline pudval_t native_pud_val(pud_t pud)
17472 {
17473 return native_pgd_val(pud.pgd);
17474 @@ -292,8 +303,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
17475 return pmd.pmd;
17476 }
17477 #else
17478 -#include <asm-generic/pgtable-nopmd.h>
17479 -
17480 static inline pmdval_t native_pmd_val(pmd_t pmd)
17481 {
17482 return native_pgd_val(pmd.pud.pgd);
17483 @@ -333,7 +342,6 @@ typedef struct page *pgtable_t;
17484
17485 extern pteval_t __supported_pte_mask;
17486 extern void set_nx(void);
17487 -extern int nx_enabled;
17488
17489 #define pgprot_writecombine pgprot_writecombine
17490 extern pgprot_t pgprot_writecombine(pgprot_t prot);
17491 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
17492 index 987c75e..2723054 100644
17493 --- a/arch/x86/include/asm/processor.h
17494 +++ b/arch/x86/include/asm/processor.h
17495 @@ -199,9 +199,21 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
17496 : "memory");
17497 }
17498
17499 +/* invpcid (%rdx),%rax */
17500 +#define __ASM_INVPCID ".byte 0x66,0x0f,0x38,0x82,0x02"
17501 +
17502 +#define INVPCID_SINGLE_ADDRESS 0UL
17503 +#define INVPCID_SINGLE_CONTEXT 1UL
17504 +#define INVPCID_ALL_GLOBAL 2UL
17505 +#define INVPCID_ALL_MONGLOBAL 3UL
17506 +
17507 +#define PCID_KERNEL 0UL
17508 +#define PCID_USER 1UL
17509 +#define PCID_NOFLUSH (1UL << 63)
17510 +
17511 static inline void load_cr3(pgd_t *pgdir)
17512 {
17513 - write_cr3(__pa(pgdir));
17514 + write_cr3(__pa(pgdir) | PCID_KERNEL);
17515 }
17516
17517 #ifdef CONFIG_X86_32
17518 @@ -283,7 +295,7 @@ struct tss_struct {
17519
17520 } ____cacheline_aligned;
17521
17522 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
17523 +extern struct tss_struct init_tss[NR_CPUS];
17524
17525 /*
17526 * Save the original ist values for checking stack pointers during debugging
17527 @@ -453,6 +465,7 @@ struct thread_struct {
17528 unsigned short ds;
17529 unsigned short fsindex;
17530 unsigned short gsindex;
17531 + unsigned short ss;
17532 #endif
17533 #ifdef CONFIG_X86_32
17534 unsigned long ip;
17535 @@ -553,29 +566,8 @@ static inline void load_sp0(struct tss_struct *tss,
17536 extern unsigned long mmu_cr4_features;
17537 extern u32 *trampoline_cr4_features;
17538
17539 -static inline void set_in_cr4(unsigned long mask)
17540 -{
17541 - unsigned long cr4;
17542 -
17543 - mmu_cr4_features |= mask;
17544 - if (trampoline_cr4_features)
17545 - *trampoline_cr4_features = mmu_cr4_features;
17546 - cr4 = read_cr4();
17547 - cr4 |= mask;
17548 - write_cr4(cr4);
17549 -}
17550 -
17551 -static inline void clear_in_cr4(unsigned long mask)
17552 -{
17553 - unsigned long cr4;
17554 -
17555 - mmu_cr4_features &= ~mask;
17556 - if (trampoline_cr4_features)
17557 - *trampoline_cr4_features = mmu_cr4_features;
17558 - cr4 = read_cr4();
17559 - cr4 &= ~mask;
17560 - write_cr4(cr4);
17561 -}
17562 +extern void set_in_cr4(unsigned long mask);
17563 +extern void clear_in_cr4(unsigned long mask);
17564
17565 typedef struct {
17566 unsigned long seg;
17567 @@ -824,11 +816,18 @@ static inline void spin_lock_prefetch(const void *x)
17568 */
17569 #define TASK_SIZE PAGE_OFFSET
17570 #define TASK_SIZE_MAX TASK_SIZE
17571 +
17572 +#ifdef CONFIG_PAX_SEGMEXEC
17573 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
17574 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
17575 +#else
17576 #define STACK_TOP TASK_SIZE
17577 -#define STACK_TOP_MAX STACK_TOP
17578 +#endif
17579 +
17580 +#define STACK_TOP_MAX TASK_SIZE
17581
17582 #define INIT_THREAD { \
17583 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
17584 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
17585 .vm86_info = NULL, \
17586 .sysenter_cs = __KERNEL_CS, \
17587 .io_bitmap_ptr = NULL, \
17588 @@ -842,7 +841,7 @@ static inline void spin_lock_prefetch(const void *x)
17589 */
17590 #define INIT_TSS { \
17591 .x86_tss = { \
17592 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
17593 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
17594 .ss0 = __KERNEL_DS, \
17595 .ss1 = __KERNEL_CS, \
17596 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
17597 @@ -853,11 +852,7 @@ static inline void spin_lock_prefetch(const void *x)
17598 extern unsigned long thread_saved_pc(struct task_struct *tsk);
17599
17600 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
17601 -#define KSTK_TOP(info) \
17602 -({ \
17603 - unsigned long *__ptr = (unsigned long *)(info); \
17604 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
17605 -})
17606 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
17607
17608 /*
17609 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
17610 @@ -872,7 +867,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
17611 #define task_pt_regs(task) \
17612 ({ \
17613 struct pt_regs *__regs__; \
17614 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
17615 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
17616 __regs__ - 1; \
17617 })
17618
17619 @@ -882,13 +877,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
17620 /*
17621 * User space process size. 47bits minus one guard page.
17622 */
17623 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
17624 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
17625
17626 /* This decides where the kernel will search for a free chunk of vm
17627 * space during mmap's.
17628 */
17629 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
17630 - 0xc0000000 : 0xFFFFe000)
17631 + 0xc0000000 : 0xFFFFf000)
17632
17633 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
17634 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
17635 @@ -899,11 +894,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
17636 #define STACK_TOP_MAX TASK_SIZE_MAX
17637
17638 #define INIT_THREAD { \
17639 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
17640 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
17641 }
17642
17643 #define INIT_TSS { \
17644 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
17645 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
17646 }
17647
17648 /*
17649 @@ -931,6 +926,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
17650 */
17651 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
17652
17653 +#ifdef CONFIG_PAX_SEGMEXEC
17654 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
17655 +#endif
17656 +
17657 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
17658
17659 /* Get/set a process' ability to use the timestamp counter instruction */
17660 @@ -957,7 +956,7 @@ static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
17661 return 0;
17662 }
17663
17664 -extern unsigned long arch_align_stack(unsigned long sp);
17665 +#define arch_align_stack(x) ((x) & ~0xfUL)
17666 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
17667
17668 void default_idle(void);
17669 @@ -967,6 +966,6 @@ bool xen_set_default_idle(void);
17670 #define xen_set_default_idle 0
17671 #endif
17672
17673 -void stop_this_cpu(void *dummy);
17674 +void stop_this_cpu(void *dummy) __noreturn;
17675 void df_debug(struct pt_regs *regs, long error_code);
17676 #endif /* _ASM_X86_PROCESSOR_H */
17677 diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
17678 index 942a086..6c26446 100644
17679 --- a/arch/x86/include/asm/ptrace.h
17680 +++ b/arch/x86/include/asm/ptrace.h
17681 @@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
17682 }
17683
17684 /*
17685 - * user_mode_vm(regs) determines whether a register set came from user mode.
17686 + * user_mode(regs) determines whether a register set came from user mode.
17687 * This is true if V8086 mode was enabled OR if the register set was from
17688 * protected mode with RPL-3 CS value. This tricky test checks that with
17689 * one comparison. Many places in the kernel can bypass this full check
17690 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
17691 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
17692 + * be used.
17693 */
17694 -static inline int user_mode(struct pt_regs *regs)
17695 +static inline int user_mode_novm(struct pt_regs *regs)
17696 {
17697 #ifdef CONFIG_X86_32
17698 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
17699 #else
17700 - return !!(regs->cs & 3);
17701 + return !!(regs->cs & SEGMENT_RPL_MASK);
17702 #endif
17703 }
17704
17705 -static inline int user_mode_vm(struct pt_regs *regs)
17706 +static inline int user_mode(struct pt_regs *regs)
17707 {
17708 #ifdef CONFIG_X86_32
17709 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
17710 USER_RPL;
17711 #else
17712 - return user_mode(regs);
17713 + return user_mode_novm(regs);
17714 #endif
17715 }
17716
17717 @@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
17718 #ifdef CONFIG_X86_64
17719 static inline bool user_64bit_mode(struct pt_regs *regs)
17720 {
17721 + unsigned long cs = regs->cs & 0xffff;
17722 #ifndef CONFIG_PARAVIRT
17723 /*
17724 * On non-paravirt systems, this is the only long mode CPL 3
17725 * selector. We do not allow long mode selectors in the LDT.
17726 */
17727 - return regs->cs == __USER_CS;
17728 + return cs == __USER_CS;
17729 #else
17730 /* Headers are too twisted for this to go in paravirt.h. */
17731 - return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
17732 + return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
17733 #endif
17734 }
17735
17736 @@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
17737 * Traps from the kernel do not save sp and ss.
17738 * Use the helper function to retrieve sp.
17739 */
17740 - if (offset == offsetof(struct pt_regs, sp) &&
17741 - regs->cs == __KERNEL_CS)
17742 - return kernel_stack_pointer(regs);
17743 + if (offset == offsetof(struct pt_regs, sp)) {
17744 + unsigned long cs = regs->cs & 0xffff;
17745 + if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
17746 + return kernel_stack_pointer(regs);
17747 + }
17748 #endif
17749 return *(unsigned long *)((unsigned long)regs + offset);
17750 }
17751 diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
17752 index 9c6b890..5305f53 100644
17753 --- a/arch/x86/include/asm/realmode.h
17754 +++ b/arch/x86/include/asm/realmode.h
17755 @@ -22,16 +22,14 @@ struct real_mode_header {
17756 #endif
17757 /* APM/BIOS reboot */
17758 u32 machine_real_restart_asm;
17759 -#ifdef CONFIG_X86_64
17760 u32 machine_real_restart_seg;
17761 -#endif
17762 };
17763
17764 /* This must match data at trampoline_32/64.S */
17765 struct trampoline_header {
17766 #ifdef CONFIG_X86_32
17767 u32 start;
17768 - u16 gdt_pad;
17769 + u16 boot_cs;
17770 u16 gdt_limit;
17771 u32 gdt_base;
17772 #else
17773 diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
17774 index a82c4f1..ac45053 100644
17775 --- a/arch/x86/include/asm/reboot.h
17776 +++ b/arch/x86/include/asm/reboot.h
17777 @@ -6,13 +6,13 @@
17778 struct pt_regs;
17779
17780 struct machine_ops {
17781 - void (*restart)(char *cmd);
17782 - void (*halt)(void);
17783 - void (*power_off)(void);
17784 + void (* __noreturn restart)(char *cmd);
17785 + void (* __noreturn halt)(void);
17786 + void (* __noreturn power_off)(void);
17787 void (*shutdown)(void);
17788 void (*crash_shutdown)(struct pt_regs *);
17789 - void (*emergency_restart)(void);
17790 -};
17791 + void (* __noreturn emergency_restart)(void);
17792 +} __no_const;
17793
17794 extern struct machine_ops machine_ops;
17795
17796 diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
17797 index cad82c9..2e5c5c1 100644
17798 --- a/arch/x86/include/asm/rwsem.h
17799 +++ b/arch/x86/include/asm/rwsem.h
17800 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
17801 {
17802 asm volatile("# beginning down_read\n\t"
17803 LOCK_PREFIX _ASM_INC "(%1)\n\t"
17804 +
17805 +#ifdef CONFIG_PAX_REFCOUNT
17806 + "jno 0f\n"
17807 + LOCK_PREFIX _ASM_DEC "(%1)\n"
17808 + "int $4\n0:\n"
17809 + _ASM_EXTABLE(0b, 0b)
17810 +#endif
17811 +
17812 /* adds 0x00000001 */
17813 " jns 1f\n"
17814 " call call_rwsem_down_read_failed\n"
17815 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
17816 "1:\n\t"
17817 " mov %1,%2\n\t"
17818 " add %3,%2\n\t"
17819 +
17820 +#ifdef CONFIG_PAX_REFCOUNT
17821 + "jno 0f\n"
17822 + "sub %3,%2\n"
17823 + "int $4\n0:\n"
17824 + _ASM_EXTABLE(0b, 0b)
17825 +#endif
17826 +
17827 " jle 2f\n\t"
17828 LOCK_PREFIX " cmpxchg %2,%0\n\t"
17829 " jnz 1b\n\t"
17830 @@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
17831 long tmp;
17832 asm volatile("# beginning down_write\n\t"
17833 LOCK_PREFIX " xadd %1,(%2)\n\t"
17834 +
17835 +#ifdef CONFIG_PAX_REFCOUNT
17836 + "jno 0f\n"
17837 + "mov %1,(%2)\n"
17838 + "int $4\n0:\n"
17839 + _ASM_EXTABLE(0b, 0b)
17840 +#endif
17841 +
17842 /* adds 0xffff0001, returns the old value */
17843 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
17844 /* was the active mask 0 before? */
17845 @@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
17846 long tmp;
17847 asm volatile("# beginning __up_read\n\t"
17848 LOCK_PREFIX " xadd %1,(%2)\n\t"
17849 +
17850 +#ifdef CONFIG_PAX_REFCOUNT
17851 + "jno 0f\n"
17852 + "mov %1,(%2)\n"
17853 + "int $4\n0:\n"
17854 + _ASM_EXTABLE(0b, 0b)
17855 +#endif
17856 +
17857 /* subtracts 1, returns the old value */
17858 " jns 1f\n\t"
17859 " call call_rwsem_wake\n" /* expects old value in %edx */
17860 @@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
17861 long tmp;
17862 asm volatile("# beginning __up_write\n\t"
17863 LOCK_PREFIX " xadd %1,(%2)\n\t"
17864 +
17865 +#ifdef CONFIG_PAX_REFCOUNT
17866 + "jno 0f\n"
17867 + "mov %1,(%2)\n"
17868 + "int $4\n0:\n"
17869 + _ASM_EXTABLE(0b, 0b)
17870 +#endif
17871 +
17872 /* subtracts 0xffff0001, returns the old value */
17873 " jns 1f\n\t"
17874 " call call_rwsem_wake\n" /* expects old value in %edx */
17875 @@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
17876 {
17877 asm volatile("# beginning __downgrade_write\n\t"
17878 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
17879 +
17880 +#ifdef CONFIG_PAX_REFCOUNT
17881 + "jno 0f\n"
17882 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
17883 + "int $4\n0:\n"
17884 + _ASM_EXTABLE(0b, 0b)
17885 +#endif
17886 +
17887 /*
17888 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
17889 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
17890 @@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
17891 */
17892 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
17893 {
17894 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
17895 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
17896 +
17897 +#ifdef CONFIG_PAX_REFCOUNT
17898 + "jno 0f\n"
17899 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
17900 + "int $4\n0:\n"
17901 + _ASM_EXTABLE(0b, 0b)
17902 +#endif
17903 +
17904 : "+m" (sem->count)
17905 : "er" (delta));
17906 }
17907 @@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
17908 */
17909 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
17910 {
17911 - return delta + xadd(&sem->count, delta);
17912 + return delta + xadd_check_overflow(&sem->count, delta);
17913 }
17914
17915 #endif /* __KERNEL__ */
17916 diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
17917 index c48a950..bc40804 100644
17918 --- a/arch/x86/include/asm/segment.h
17919 +++ b/arch/x86/include/asm/segment.h
17920 @@ -64,10 +64,15 @@
17921 * 26 - ESPFIX small SS
17922 * 27 - per-cpu [ offset to per-cpu data area ]
17923 * 28 - stack_canary-20 [ for stack protector ]
17924 - * 29 - unused
17925 - * 30 - unused
17926 + * 29 - PCI BIOS CS
17927 + * 30 - PCI BIOS DS
17928 * 31 - TSS for double fault handler
17929 */
17930 +#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
17931 +#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
17932 +#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
17933 +#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
17934 +
17935 #define GDT_ENTRY_TLS_MIN 6
17936 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
17937
17938 @@ -79,6 +84,8 @@
17939
17940 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
17941
17942 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
17943 +
17944 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
17945
17946 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
17947 @@ -104,6 +111,12 @@
17948 #define __KERNEL_STACK_CANARY 0
17949 #endif
17950
17951 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
17952 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
17953 +
17954 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
17955 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
17956 +
17957 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
17958
17959 /*
17960 @@ -141,7 +154,7 @@
17961 */
17962
17963 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
17964 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
17965 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
17966
17967
17968 #else
17969 @@ -165,6 +178,8 @@
17970 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
17971 #define __USER32_DS __USER_DS
17972
17973 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
17974 +
17975 #define GDT_ENTRY_TSS 8 /* needs two entries */
17976 #define GDT_ENTRY_LDT 10 /* needs two entries */
17977 #define GDT_ENTRY_TLS_MIN 12
17978 @@ -173,6 +188,8 @@
17979 #define GDT_ENTRY_PER_CPU 15 /* Abused to load per CPU data from limit */
17980 #define __PER_CPU_SEG (GDT_ENTRY_PER_CPU * 8 + 3)
17981
17982 +#define GDT_ENTRY_UDEREF_KERNEL_DS 16
17983 +
17984 /* TLS indexes for 64bit - hardcoded in arch_prctl */
17985 #define FS_TLS 0
17986 #define GS_TLS 1
17987 @@ -180,12 +197,14 @@
17988 #define GS_TLS_SEL ((GDT_ENTRY_TLS_MIN+GS_TLS)*8 + 3)
17989 #define FS_TLS_SEL ((GDT_ENTRY_TLS_MIN+FS_TLS)*8 + 3)
17990
17991 -#define GDT_ENTRIES 16
17992 +#define GDT_ENTRIES 17
17993
17994 #endif
17995
17996 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
17997 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
17998 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
17999 +#define __UDEREF_KERNEL_DS (GDT_ENTRY_UDEREF_KERNEL_DS*8)
18000 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
18001 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
18002 #ifndef CONFIG_PARAVIRT
18003 @@ -265,7 +284,7 @@ static inline unsigned long get_limit(unsigned long segment)
18004 {
18005 unsigned long __limit;
18006 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
18007 - return __limit + 1;
18008 + return __limit;
18009 }
18010
18011 #endif /* !__ASSEMBLY__ */
18012 diff --git a/arch/x86/include/asm/smap.h b/arch/x86/include/asm/smap.h
18013 index 8d3120f..352b440 100644
18014 --- a/arch/x86/include/asm/smap.h
18015 +++ b/arch/x86/include/asm/smap.h
18016 @@ -25,11 +25,40 @@
18017
18018 #include <asm/alternative-asm.h>
18019
18020 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18021 +#define ASM_PAX_OPEN_USERLAND \
18022 + 661: jmp 663f; \
18023 + .pushsection .altinstr_replacement, "a" ; \
18024 + 662: pushq %rax; nop; \
18025 + .popsection ; \
18026 + .pushsection .altinstructions, "a" ; \
18027 + altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
18028 + .popsection ; \
18029 + call __pax_open_userland; \
18030 + popq %rax; \
18031 + 663:
18032 +
18033 +#define ASM_PAX_CLOSE_USERLAND \
18034 + 661: jmp 663f; \
18035 + .pushsection .altinstr_replacement, "a" ; \
18036 + 662: pushq %rax; nop; \
18037 + .popsection; \
18038 + .pushsection .altinstructions, "a" ; \
18039 + altinstruction_entry 661b, 662b, X86_FEATURE_STRONGUDEREF, 2, 2;\
18040 + .popsection; \
18041 + call __pax_close_userland; \
18042 + popq %rax; \
18043 + 663:
18044 +#else
18045 +#define ASM_PAX_OPEN_USERLAND
18046 +#define ASM_PAX_CLOSE_USERLAND
18047 +#endif
18048 +
18049 #ifdef CONFIG_X86_SMAP
18050
18051 #define ASM_CLAC \
18052 661: ASM_NOP3 ; \
18053 - .pushsection .altinstr_replacement, "ax" ; \
18054 + .pushsection .altinstr_replacement, "a" ; \
18055 662: __ASM_CLAC ; \
18056 .popsection ; \
18057 .pushsection .altinstructions, "a" ; \
18058 @@ -38,7 +67,7 @@
18059
18060 #define ASM_STAC \
18061 661: ASM_NOP3 ; \
18062 - .pushsection .altinstr_replacement, "ax" ; \
18063 + .pushsection .altinstr_replacement, "a" ; \
18064 662: __ASM_STAC ; \
18065 .popsection ; \
18066 .pushsection .altinstructions, "a" ; \
18067 @@ -56,6 +85,37 @@
18068
18069 #include <asm/alternative.h>
18070
18071 +#define __HAVE_ARCH_PAX_OPEN_USERLAND
18072 +#define __HAVE_ARCH_PAX_CLOSE_USERLAND
18073 +
18074 +extern void __pax_open_userland(void);
18075 +static __always_inline unsigned long pax_open_userland(void)
18076 +{
18077 +
18078 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18079 + asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[open]", X86_FEATURE_STRONGUDEREF)
18080 + :
18081 + : [open] "i" (__pax_open_userland)
18082 + : "memory", "rax");
18083 +#endif
18084 +
18085 + return 0;
18086 +}
18087 +
18088 +extern void __pax_close_userland(void);
18089 +static __always_inline unsigned long pax_close_userland(void)
18090 +{
18091 +
18092 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18093 + asm volatile(ALTERNATIVE(ASM_NOP5, "call %P[close]", X86_FEATURE_STRONGUDEREF)
18094 + :
18095 + : [close] "i" (__pax_close_userland)
18096 + : "memory", "rax");
18097 +#endif
18098 +
18099 + return 0;
18100 +}
18101 +
18102 #ifdef CONFIG_X86_SMAP
18103
18104 static __always_inline void clac(void)
18105 diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
18106 index 4137890..03fa172 100644
18107 --- a/arch/x86/include/asm/smp.h
18108 +++ b/arch/x86/include/asm/smp.h
18109 @@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
18110 /* cpus sharing the last level cache: */
18111 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
18112 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
18113 -DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
18114 +DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
18115
18116 static inline struct cpumask *cpu_sibling_mask(int cpu)
18117 {
18118 @@ -79,7 +79,7 @@ struct smp_ops {
18119
18120 void (*send_call_func_ipi)(const struct cpumask *mask);
18121 void (*send_call_func_single_ipi)(int cpu);
18122 -};
18123 +} __no_const;
18124
18125 /* Globals due to paravirt */
18126 extern void set_cpu_sibling_map(int cpu);
18127 @@ -191,14 +191,8 @@ extern unsigned disabled_cpus;
18128 extern int safe_smp_processor_id(void);
18129
18130 #elif defined(CONFIG_X86_64_SMP)
18131 -#define raw_smp_processor_id() (this_cpu_read(cpu_number))
18132 -
18133 -#define stack_smp_processor_id() \
18134 -({ \
18135 - struct thread_info *ti; \
18136 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
18137 - ti->cpu; \
18138 -})
18139 +#define raw_smp_processor_id() (this_cpu_read(cpu_number))
18140 +#define stack_smp_processor_id() raw_smp_processor_id()
18141 #define safe_smp_processor_id() smp_processor_id()
18142
18143 #endif
18144 diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
18145 index bf156de..1a782ab 100644
18146 --- a/arch/x86/include/asm/spinlock.h
18147 +++ b/arch/x86/include/asm/spinlock.h
18148 @@ -223,6 +223,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
18149 static inline void arch_read_lock(arch_rwlock_t *rw)
18150 {
18151 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
18152 +
18153 +#ifdef CONFIG_PAX_REFCOUNT
18154 + "jno 0f\n"
18155 + LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
18156 + "int $4\n0:\n"
18157 + _ASM_EXTABLE(0b, 0b)
18158 +#endif
18159 +
18160 "jns 1f\n"
18161 "call __read_lock_failed\n\t"
18162 "1:\n"
18163 @@ -232,6 +240,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
18164 static inline void arch_write_lock(arch_rwlock_t *rw)
18165 {
18166 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
18167 +
18168 +#ifdef CONFIG_PAX_REFCOUNT
18169 + "jno 0f\n"
18170 + LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
18171 + "int $4\n0:\n"
18172 + _ASM_EXTABLE(0b, 0b)
18173 +#endif
18174 +
18175 "jz 1f\n"
18176 "call __write_lock_failed\n\t"
18177 "1:\n"
18178 @@ -261,13 +277,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
18179
18180 static inline void arch_read_unlock(arch_rwlock_t *rw)
18181 {
18182 - asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
18183 + asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
18184 +
18185 +#ifdef CONFIG_PAX_REFCOUNT
18186 + "jno 0f\n"
18187 + LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
18188 + "int $4\n0:\n"
18189 + _ASM_EXTABLE(0b, 0b)
18190 +#endif
18191 +
18192 :"+m" (rw->lock) : : "memory");
18193 }
18194
18195 static inline void arch_write_unlock(arch_rwlock_t *rw)
18196 {
18197 - asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
18198 + asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
18199 +
18200 +#ifdef CONFIG_PAX_REFCOUNT
18201 + "jno 0f\n"
18202 + LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
18203 + "int $4\n0:\n"
18204 + _ASM_EXTABLE(0b, 0b)
18205 +#endif
18206 +
18207 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
18208 }
18209
18210 diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
18211 index 6a99859..03cb807 100644
18212 --- a/arch/x86/include/asm/stackprotector.h
18213 +++ b/arch/x86/include/asm/stackprotector.h
18214 @@ -47,7 +47,7 @@
18215 * head_32 for boot CPU and setup_per_cpu_areas() for others.
18216 */
18217 #define GDT_STACK_CANARY_INIT \
18218 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
18219 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
18220
18221 /*
18222 * Initialize the stackprotector canary value.
18223 @@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
18224
18225 static inline void load_stack_canary_segment(void)
18226 {
18227 -#ifdef CONFIG_X86_32
18228 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18229 asm volatile ("mov %0, %%gs" : : "r" (0));
18230 #endif
18231 }
18232 diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
18233 index 70bbe39..4ae2bd4 100644
18234 --- a/arch/x86/include/asm/stacktrace.h
18235 +++ b/arch/x86/include/asm/stacktrace.h
18236 @@ -11,28 +11,20 @@
18237
18238 extern int kstack_depth_to_print;
18239
18240 -struct thread_info;
18241 +struct task_struct;
18242 struct stacktrace_ops;
18243
18244 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
18245 - unsigned long *stack,
18246 - unsigned long bp,
18247 - const struct stacktrace_ops *ops,
18248 - void *data,
18249 - unsigned long *end,
18250 - int *graph);
18251 +typedef unsigned long walk_stack_t(struct task_struct *task,
18252 + void *stack_start,
18253 + unsigned long *stack,
18254 + unsigned long bp,
18255 + const struct stacktrace_ops *ops,
18256 + void *data,
18257 + unsigned long *end,
18258 + int *graph);
18259
18260 -extern unsigned long
18261 -print_context_stack(struct thread_info *tinfo,
18262 - unsigned long *stack, unsigned long bp,
18263 - const struct stacktrace_ops *ops, void *data,
18264 - unsigned long *end, int *graph);
18265 -
18266 -extern unsigned long
18267 -print_context_stack_bp(struct thread_info *tinfo,
18268 - unsigned long *stack, unsigned long bp,
18269 - const struct stacktrace_ops *ops, void *data,
18270 - unsigned long *end, int *graph);
18271 +extern walk_stack_t print_context_stack;
18272 +extern walk_stack_t print_context_stack_bp;
18273
18274 /* Generic stack tracer with callbacks */
18275
18276 @@ -40,7 +32,7 @@ struct stacktrace_ops {
18277 void (*address)(void *data, unsigned long address, int reliable);
18278 /* On negative return stop dumping */
18279 int (*stack)(void *data, char *name);
18280 - walk_stack_t walk_stack;
18281 + walk_stack_t *walk_stack;
18282 };
18283
18284 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
18285 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
18286 index d7f3b3b..3cc39f1 100644
18287 --- a/arch/x86/include/asm/switch_to.h
18288 +++ b/arch/x86/include/asm/switch_to.h
18289 @@ -108,7 +108,7 @@ do { \
18290 "call __switch_to\n\t" \
18291 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
18292 __switch_canary \
18293 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
18294 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
18295 "movq %%rax,%%rdi\n\t" \
18296 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
18297 "jnz ret_from_fork\n\t" \
18298 @@ -119,7 +119,7 @@ do { \
18299 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
18300 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
18301 [_tif_fork] "i" (_TIF_FORK), \
18302 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
18303 + [thread_info] "m" (current_tinfo), \
18304 [current_task] "m" (current_task) \
18305 __switch_canary_iparam \
18306 : "memory", "cc" __EXTRA_CLOBBER)
18307 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
18308 index 2781119..618b59b 100644
18309 --- a/arch/x86/include/asm/thread_info.h
18310 +++ b/arch/x86/include/asm/thread_info.h
18311 @@ -10,6 +10,7 @@
18312 #include <linux/compiler.h>
18313 #include <asm/page.h>
18314 #include <asm/types.h>
18315 +#include <asm/percpu.h>
18316
18317 /*
18318 * low level task data that entry.S needs immediate access to
18319 @@ -23,7 +24,6 @@ struct exec_domain;
18320 #include <linux/atomic.h>
18321
18322 struct thread_info {
18323 - struct task_struct *task; /* main task structure */
18324 struct exec_domain *exec_domain; /* execution domain */
18325 __u32 flags; /* low level flags */
18326 __u32 status; /* thread synchronous flags */
18327 @@ -33,19 +33,13 @@ struct thread_info {
18328 mm_segment_t addr_limit;
18329 struct restart_block restart_block;
18330 void __user *sysenter_return;
18331 -#ifdef CONFIG_X86_32
18332 - unsigned long previous_esp; /* ESP of the previous stack in
18333 - case of nested (IRQ) stacks
18334 - */
18335 - __u8 supervisor_stack[0];
18336 -#endif
18337 + unsigned long lowest_stack;
18338 unsigned int sig_on_uaccess_error:1;
18339 unsigned int uaccess_err:1; /* uaccess failed */
18340 };
18341
18342 -#define INIT_THREAD_INFO(tsk) \
18343 +#define INIT_THREAD_INFO \
18344 { \
18345 - .task = &tsk, \
18346 .exec_domain = &default_exec_domain, \
18347 .flags = 0, \
18348 .cpu = 0, \
18349 @@ -56,7 +50,7 @@ struct thread_info {
18350 }, \
18351 }
18352
18353 -#define init_thread_info (init_thread_union.thread_info)
18354 +#define init_thread_info (init_thread_union.stack)
18355 #define init_stack (init_thread_union.stack)
18356
18357 #else /* !__ASSEMBLY__ */
18358 @@ -96,6 +90,7 @@ struct thread_info {
18359 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
18360 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
18361 #define TIF_X32 30 /* 32-bit native x86-64 binary */
18362 +#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
18363
18364 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
18365 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
18366 @@ -119,17 +114,18 @@ struct thread_info {
18367 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
18368 #define _TIF_ADDR32 (1 << TIF_ADDR32)
18369 #define _TIF_X32 (1 << TIF_X32)
18370 +#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
18371
18372 /* work to do in syscall_trace_enter() */
18373 #define _TIF_WORK_SYSCALL_ENTRY \
18374 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
18375 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
18376 - _TIF_NOHZ)
18377 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
18378
18379 /* work to do in syscall_trace_leave() */
18380 #define _TIF_WORK_SYSCALL_EXIT \
18381 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
18382 - _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
18383 + _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
18384
18385 /* work to do on interrupt/exception return */
18386 #define _TIF_WORK_MASK \
18387 @@ -140,7 +136,7 @@ struct thread_info {
18388 /* work to do on any return to user space */
18389 #define _TIF_ALLWORK_MASK \
18390 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
18391 - _TIF_NOHZ)
18392 + _TIF_NOHZ | _TIF_GRSEC_SETXID)
18393
18394 /* Only used for 64 bit */
18395 #define _TIF_DO_NOTIFY_MASK \
18396 @@ -156,45 +152,40 @@ struct thread_info {
18397
18398 #define PREEMPT_ACTIVE 0x10000000
18399
18400 -#ifdef CONFIG_X86_32
18401 -
18402 -#define STACK_WARN (THREAD_SIZE/8)
18403 -/*
18404 - * macros/functions for gaining access to the thread information structure
18405 - *
18406 - * preempt_count needs to be 1 initially, until the scheduler is functional.
18407 - */
18408 -#ifndef __ASSEMBLY__
18409 -
18410 -
18411 -/* how to get the current stack pointer from C */
18412 -register unsigned long current_stack_pointer asm("esp") __used;
18413 -
18414 -/* how to get the thread information struct from C */
18415 -static inline struct thread_info *current_thread_info(void)
18416 -{
18417 - return (struct thread_info *)
18418 - (current_stack_pointer & ~(THREAD_SIZE - 1));
18419 -}
18420 -
18421 -#else /* !__ASSEMBLY__ */
18422 -
18423 +#ifdef __ASSEMBLY__
18424 /* how to get the thread information struct from ASM */
18425 #define GET_THREAD_INFO(reg) \
18426 - movl $-THREAD_SIZE, reg; \
18427 - andl %esp, reg
18428 + mov PER_CPU_VAR(current_tinfo), reg
18429
18430 /* use this one if reg already contains %esp */
18431 -#define GET_THREAD_INFO_WITH_ESP(reg) \
18432 - andl $-THREAD_SIZE, reg
18433 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
18434 +#else
18435 +/* how to get the thread information struct from C */
18436 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
18437 +
18438 +static __always_inline struct thread_info *current_thread_info(void)
18439 +{
18440 + return this_cpu_read_stable(current_tinfo);
18441 +}
18442 +#endif
18443 +
18444 +#ifdef CONFIG_X86_32
18445 +
18446 +#define STACK_WARN (THREAD_SIZE/8)
18447 +/*
18448 + * macros/functions for gaining access to the thread information structure
18449 + *
18450 + * preempt_count needs to be 1 initially, until the scheduler is functional.
18451 + */
18452 +#ifndef __ASSEMBLY__
18453 +
18454 +/* how to get the current stack pointer from C */
18455 +register unsigned long current_stack_pointer asm("esp") __used;
18456
18457 #endif
18458
18459 #else /* X86_32 */
18460
18461 -#include <asm/percpu.h>
18462 -#define KERNEL_STACK_OFFSET (5*8)
18463 -
18464 /*
18465 * macros/functions for gaining access to the thread information structure
18466 * preempt_count needs to be 1 initially, until the scheduler is functional.
18467 @@ -202,27 +193,8 @@ static inline struct thread_info *current_thread_info(void)
18468 #ifndef __ASSEMBLY__
18469 DECLARE_PER_CPU(unsigned long, kernel_stack);
18470
18471 -static inline struct thread_info *current_thread_info(void)
18472 -{
18473 - struct thread_info *ti;
18474 - ti = (void *)(this_cpu_read_stable(kernel_stack) +
18475 - KERNEL_STACK_OFFSET - THREAD_SIZE);
18476 - return ti;
18477 -}
18478 -
18479 -#else /* !__ASSEMBLY__ */
18480 -
18481 -/* how to get the thread information struct from ASM */
18482 -#define GET_THREAD_INFO(reg) \
18483 - movq PER_CPU_VAR(kernel_stack),reg ; \
18484 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
18485 -
18486 -/*
18487 - * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
18488 - * a certain register (to be used in assembler memory operands).
18489 - */
18490 -#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
18491 -
18492 +/* how to get the current stack pointer from C */
18493 +register unsigned long current_stack_pointer asm("rsp") __used;
18494 #endif
18495
18496 #endif /* !X86_32 */
18497 @@ -281,5 +253,12 @@ static inline bool is_ia32_task(void)
18498 extern void arch_task_cache_init(void);
18499 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
18500 extern void arch_release_task_struct(struct task_struct *tsk);
18501 +
18502 +#define __HAVE_THREAD_FUNCTIONS
18503 +#define task_thread_info(task) (&(task)->tinfo)
18504 +#define task_stack_page(task) ((task)->stack)
18505 +#define setup_thread_stack(p, org) do {} while (0)
18506 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
18507 +
18508 #endif
18509 #endif /* _ASM_X86_THREAD_INFO_H */
18510 diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
18511 index e6d90ba..0897f44 100644
18512 --- a/arch/x86/include/asm/tlbflush.h
18513 +++ b/arch/x86/include/asm/tlbflush.h
18514 @@ -17,18 +17,44 @@
18515
18516 static inline void __native_flush_tlb(void)
18517 {
18518 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
18519 + u64 descriptor[2];
18520 +
18521 + descriptor[0] = PCID_KERNEL;
18522 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_MONGLOBAL) : "memory");
18523 + return;
18524 + }
18525 +
18526 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18527 + if (static_cpu_has(X86_FEATURE_PCID)) {
18528 + unsigned int cpu = raw_get_cpu();
18529 +
18530 + native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER);
18531 + native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL);
18532 + raw_put_cpu_no_resched();
18533 + return;
18534 + }
18535 +#endif
18536 +
18537 native_write_cr3(native_read_cr3());
18538 }
18539
18540 static inline void __native_flush_tlb_global_irq_disabled(void)
18541 {
18542 - unsigned long cr4;
18543 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
18544 + u64 descriptor[2];
18545
18546 - cr4 = native_read_cr4();
18547 - /* clear PGE */
18548 - native_write_cr4(cr4 & ~X86_CR4_PGE);
18549 - /* write old PGE again and flush TLBs */
18550 - native_write_cr4(cr4);
18551 + descriptor[0] = PCID_KERNEL;
18552 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_ALL_GLOBAL) : "memory");
18553 + } else {
18554 + unsigned long cr4;
18555 +
18556 + cr4 = native_read_cr4();
18557 + /* clear PGE */
18558 + native_write_cr4(cr4 & ~X86_CR4_PGE);
18559 + /* write old PGE again and flush TLBs */
18560 + native_write_cr4(cr4);
18561 + }
18562 }
18563
18564 static inline void __native_flush_tlb_global(void)
18565 @@ -49,6 +75,42 @@ static inline void __native_flush_tlb_global(void)
18566
18567 static inline void __native_flush_tlb_single(unsigned long addr)
18568 {
18569 +
18570 + if (static_cpu_has(X86_FEATURE_INVPCID)) {
18571 + u64 descriptor[2];
18572 +
18573 + descriptor[0] = PCID_KERNEL;
18574 + descriptor[1] = addr;
18575 +
18576 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18577 + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) || addr >= TASK_SIZE_MAX) {
18578 + if (addr < TASK_SIZE_MAX)
18579 + descriptor[1] += pax_user_shadow_base;
18580 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
18581 + }
18582 +
18583 + descriptor[0] = PCID_USER;
18584 + descriptor[1] = addr;
18585 +#endif
18586 +
18587 + asm volatile(__ASM_INVPCID : : "d"(&descriptor), "a"(INVPCID_SINGLE_ADDRESS) : "memory");
18588 + return;
18589 + }
18590 +
18591 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18592 + if (static_cpu_has(X86_FEATURE_PCID)) {
18593 + unsigned int cpu = raw_get_cpu();
18594 +
18595 + native_write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
18596 + asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
18597 + native_write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
18598 + raw_put_cpu_no_resched();
18599 +
18600 + if (!static_cpu_has(X86_FEATURE_STRONGUDEREF) && addr < TASK_SIZE_MAX)
18601 + addr += pax_user_shadow_base;
18602 + }
18603 +#endif
18604 +
18605 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
18606 }
18607
18608 diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
18609 index 5838fa9..f7ae572 100644
18610 --- a/arch/x86/include/asm/uaccess.h
18611 +++ b/arch/x86/include/asm/uaccess.h
18612 @@ -7,6 +7,7 @@
18613 #include <linux/compiler.h>
18614 #include <linux/thread_info.h>
18615 #include <linux/string.h>
18616 +#include <linux/sched.h>
18617 #include <asm/asm.h>
18618 #include <asm/page.h>
18619 #include <asm/smap.h>
18620 @@ -29,7 +30,12 @@
18621
18622 #define get_ds() (KERNEL_DS)
18623 #define get_fs() (current_thread_info()->addr_limit)
18624 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18625 +void __set_fs(mm_segment_t x);
18626 +void set_fs(mm_segment_t x);
18627 +#else
18628 #define set_fs(x) (current_thread_info()->addr_limit = (x))
18629 +#endif
18630
18631 #define segment_eq(a, b) ((a).seg == (b).seg)
18632
18633 @@ -77,8 +83,33 @@
18634 * checks that the pointer is in the user space range - after calling
18635 * this function, memory access functions may still return -EFAULT.
18636 */
18637 -#define access_ok(type, addr, size) \
18638 - (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
18639 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
18640 +#define access_ok(type, addr, size) \
18641 +({ \
18642 + long __size = size; \
18643 + unsigned long __addr = (unsigned long)addr; \
18644 + unsigned long __addr_ao = __addr & PAGE_MASK; \
18645 + unsigned long __end_ao = __addr + __size - 1; \
18646 + bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
18647 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
18648 + while(__addr_ao <= __end_ao) { \
18649 + char __c_ao; \
18650 + __addr_ao += PAGE_SIZE; \
18651 + if (__size > PAGE_SIZE) \
18652 + cond_resched(); \
18653 + if (__get_user(__c_ao, (char __user *)__addr)) \
18654 + break; \
18655 + if (type != VERIFY_WRITE) { \
18656 + __addr = __addr_ao; \
18657 + continue; \
18658 + } \
18659 + if (__put_user(__c_ao, (char __user *)__addr)) \
18660 + break; \
18661 + __addr = __addr_ao; \
18662 + } \
18663 + } \
18664 + __ret_ao; \
18665 +})
18666
18667 /*
18668 * The exception table consists of pairs of addresses relative to the
18669 @@ -168,10 +199,12 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
18670 register __inttype(*(ptr)) __val_gu asm("%"_ASM_DX); \
18671 __chk_user_ptr(ptr); \
18672 might_fault(); \
18673 + pax_open_userland(); \
18674 asm volatile("call __get_user_%P3" \
18675 : "=a" (__ret_gu), "=r" (__val_gu) \
18676 : "0" (ptr), "i" (sizeof(*(ptr)))); \
18677 (x) = (__typeof__(*(ptr))) __val_gu; \
18678 + pax_close_userland(); \
18679 __ret_gu; \
18680 })
18681
18682 @@ -179,13 +212,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
18683 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
18684 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
18685
18686 -
18687 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18688 +#define __copyuser_seg "gs;"
18689 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
18690 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
18691 +#else
18692 +#define __copyuser_seg
18693 +#define __COPYUSER_SET_ES
18694 +#define __COPYUSER_RESTORE_ES
18695 +#endif
18696
18697 #ifdef CONFIG_X86_32
18698 #define __put_user_asm_u64(x, addr, err, errret) \
18699 asm volatile(ASM_STAC "\n" \
18700 - "1: movl %%eax,0(%2)\n" \
18701 - "2: movl %%edx,4(%2)\n" \
18702 + "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
18703 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
18704 "3: " ASM_CLAC "\n" \
18705 ".section .fixup,\"ax\"\n" \
18706 "4: movl %3,%0\n" \
18707 @@ -198,8 +239,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
18708
18709 #define __put_user_asm_ex_u64(x, addr) \
18710 asm volatile(ASM_STAC "\n" \
18711 - "1: movl %%eax,0(%1)\n" \
18712 - "2: movl %%edx,4(%1)\n" \
18713 + "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
18714 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
18715 "3: " ASM_CLAC "\n" \
18716 _ASM_EXTABLE_EX(1b, 2b) \
18717 _ASM_EXTABLE_EX(2b, 3b) \
18718 @@ -249,7 +290,8 @@ extern void __put_user_8(void);
18719 __typeof__(*(ptr)) __pu_val; \
18720 __chk_user_ptr(ptr); \
18721 might_fault(); \
18722 - __pu_val = x; \
18723 + __pu_val = (x); \
18724 + pax_open_userland(); \
18725 switch (sizeof(*(ptr))) { \
18726 case 1: \
18727 __put_user_x(1, __pu_val, ptr, __ret_pu); \
18728 @@ -267,6 +309,7 @@ extern void __put_user_8(void);
18729 __put_user_x(X, __pu_val, ptr, __ret_pu); \
18730 break; \
18731 } \
18732 + pax_close_userland(); \
18733 __ret_pu; \
18734 })
18735
18736 @@ -347,8 +390,10 @@ do { \
18737 } while (0)
18738
18739 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
18740 +do { \
18741 + pax_open_userland(); \
18742 asm volatile(ASM_STAC "\n" \
18743 - "1: mov"itype" %2,%"rtype"1\n" \
18744 + "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
18745 "2: " ASM_CLAC "\n" \
18746 ".section .fixup,\"ax\"\n" \
18747 "3: mov %3,%0\n" \
18748 @@ -356,8 +401,10 @@ do { \
18749 " jmp 2b\n" \
18750 ".previous\n" \
18751 _ASM_EXTABLE(1b, 3b) \
18752 - : "=r" (err), ltype(x) \
18753 - : "m" (__m(addr)), "i" (errret), "0" (err))
18754 + : "=r" (err), ltype (x) \
18755 + : "m" (__m(addr)), "i" (errret), "0" (err)); \
18756 + pax_close_userland(); \
18757 +} while (0)
18758
18759 #define __get_user_size_ex(x, ptr, size) \
18760 do { \
18761 @@ -381,7 +428,7 @@ do { \
18762 } while (0)
18763
18764 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
18765 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
18766 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
18767 "2:\n" \
18768 _ASM_EXTABLE_EX(1b, 2b) \
18769 : ltype(x) : "m" (__m(addr)))
18770 @@ -398,13 +445,24 @@ do { \
18771 int __gu_err; \
18772 unsigned long __gu_val; \
18773 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
18774 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
18775 + (x) = (__typeof__(*(ptr)))__gu_val; \
18776 __gu_err; \
18777 })
18778
18779 /* FIXME: this hack is definitely wrong -AK */
18780 struct __large_struct { unsigned long buf[100]; };
18781 -#define __m(x) (*(struct __large_struct __user *)(x))
18782 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18783 +#define ____m(x) \
18784 +({ \
18785 + unsigned long ____x = (unsigned long)(x); \
18786 + if (____x < pax_user_shadow_base) \
18787 + ____x += pax_user_shadow_base; \
18788 + (typeof(x))____x; \
18789 +})
18790 +#else
18791 +#define ____m(x) (x)
18792 +#endif
18793 +#define __m(x) (*(struct __large_struct __user *)____m(x))
18794
18795 /*
18796 * Tell gcc we read from memory instead of writing: this is because
18797 @@ -412,8 +470,10 @@ struct __large_struct { unsigned long buf[100]; };
18798 * aliasing issues.
18799 */
18800 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
18801 +do { \
18802 + pax_open_userland(); \
18803 asm volatile(ASM_STAC "\n" \
18804 - "1: mov"itype" %"rtype"1,%2\n" \
18805 + "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
18806 "2: " ASM_CLAC "\n" \
18807 ".section .fixup,\"ax\"\n" \
18808 "3: mov %3,%0\n" \
18809 @@ -421,10 +481,12 @@ struct __large_struct { unsigned long buf[100]; };
18810 ".previous\n" \
18811 _ASM_EXTABLE(1b, 3b) \
18812 : "=r"(err) \
18813 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
18814 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err));\
18815 + pax_close_userland(); \
18816 +} while (0)
18817
18818 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
18819 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
18820 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
18821 "2:\n" \
18822 _ASM_EXTABLE_EX(1b, 2b) \
18823 : : ltype(x), "m" (__m(addr)))
18824 @@ -434,11 +496,13 @@ struct __large_struct { unsigned long buf[100]; };
18825 */
18826 #define uaccess_try do { \
18827 current_thread_info()->uaccess_err = 0; \
18828 + pax_open_userland(); \
18829 stac(); \
18830 barrier();
18831
18832 #define uaccess_catch(err) \
18833 clac(); \
18834 + pax_close_userland(); \
18835 (err) |= (current_thread_info()->uaccess_err ? -EFAULT : 0); \
18836 } while (0)
18837
18838 @@ -463,8 +527,12 @@ struct __large_struct { unsigned long buf[100]; };
18839 * On error, the variable @x is set to zero.
18840 */
18841
18842 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18843 +#define __get_user(x, ptr) get_user((x), (ptr))
18844 +#else
18845 #define __get_user(x, ptr) \
18846 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
18847 +#endif
18848
18849 /**
18850 * __put_user: - Write a simple value into user space, with less checking.
18851 @@ -486,8 +554,12 @@ struct __large_struct { unsigned long buf[100]; };
18852 * Returns zero on success, or -EFAULT on error.
18853 */
18854
18855 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18856 +#define __put_user(x, ptr) put_user((x), (ptr))
18857 +#else
18858 #define __put_user(x, ptr) \
18859 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
18860 +#endif
18861
18862 #define __get_user_unaligned __get_user
18863 #define __put_user_unaligned __put_user
18864 @@ -505,7 +577,7 @@ struct __large_struct { unsigned long buf[100]; };
18865 #define get_user_ex(x, ptr) do { \
18866 unsigned long __gue_val; \
18867 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
18868 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
18869 + (x) = (__typeof__(*(ptr)))__gue_val; \
18870 } while (0)
18871
18872 #define put_user_try uaccess_try
18873 diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
18874 index 7f760a9..b596b9a 100644
18875 --- a/arch/x86/include/asm/uaccess_32.h
18876 +++ b/arch/x86/include/asm/uaccess_32.h
18877 @@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
18878 static __always_inline unsigned long __must_check
18879 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
18880 {
18881 + if ((long)n < 0)
18882 + return n;
18883 +
18884 + check_object_size(from, n, true);
18885 +
18886 if (__builtin_constant_p(n)) {
18887 unsigned long ret;
18888
18889 @@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
18890 __copy_to_user(void __user *to, const void *from, unsigned long n)
18891 {
18892 might_fault();
18893 +
18894 return __copy_to_user_inatomic(to, from, n);
18895 }
18896
18897 static __always_inline unsigned long
18898 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
18899 {
18900 + if ((long)n < 0)
18901 + return n;
18902 +
18903 /* Avoid zeroing the tail if the copy fails..
18904 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
18905 * but as the zeroing behaviour is only significant when n is not
18906 @@ -137,6 +146,12 @@ static __always_inline unsigned long
18907 __copy_from_user(void *to, const void __user *from, unsigned long n)
18908 {
18909 might_fault();
18910 +
18911 + if ((long)n < 0)
18912 + return n;
18913 +
18914 + check_object_size(to, n, false);
18915 +
18916 if (__builtin_constant_p(n)) {
18917 unsigned long ret;
18918
18919 @@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
18920 const void __user *from, unsigned long n)
18921 {
18922 might_fault();
18923 +
18924 + if ((long)n < 0)
18925 + return n;
18926 +
18927 if (__builtin_constant_p(n)) {
18928 unsigned long ret;
18929
18930 @@ -181,15 +200,19 @@ static __always_inline unsigned long
18931 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
18932 unsigned long n)
18933 {
18934 - return __copy_from_user_ll_nocache_nozero(to, from, n);
18935 + if ((long)n < 0)
18936 + return n;
18937 +
18938 + return __copy_from_user_ll_nocache_nozero(to, from, n);
18939 }
18940
18941 -unsigned long __must_check copy_to_user(void __user *to,
18942 - const void *from, unsigned long n);
18943 -unsigned long __must_check _copy_from_user(void *to,
18944 - const void __user *from,
18945 - unsigned long n);
18946 -
18947 +extern void copy_to_user_overflow(void)
18948 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18949 + __compiletime_error("copy_to_user() buffer size is not provably correct")
18950 +#else
18951 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
18952 +#endif
18953 +;
18954
18955 extern void copy_from_user_overflow(void)
18956 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
18957 @@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
18958 #endif
18959 ;
18960
18961 -static inline unsigned long __must_check copy_from_user(void *to,
18962 - const void __user *from,
18963 - unsigned long n)
18964 +/**
18965 + * copy_to_user: - Copy a block of data into user space.
18966 + * @to: Destination address, in user space.
18967 + * @from: Source address, in kernel space.
18968 + * @n: Number of bytes to copy.
18969 + *
18970 + * Context: User context only. This function may sleep.
18971 + *
18972 + * Copy data from kernel space to user space.
18973 + *
18974 + * Returns number of bytes that could not be copied.
18975 + * On success, this will be zero.
18976 + */
18977 +static inline unsigned long __must_check
18978 +copy_to_user(void __user *to, const void *from, unsigned long n)
18979 {
18980 - int sz = __compiletime_object_size(to);
18981 + size_t sz = __compiletime_object_size(from);
18982
18983 - if (likely(sz == -1 || sz >= n))
18984 - n = _copy_from_user(to, from, n);
18985 - else
18986 + if (unlikely(sz != (size_t)-1 && sz < n))
18987 + copy_to_user_overflow();
18988 + else if (access_ok(VERIFY_WRITE, to, n))
18989 + n = __copy_to_user(to, from, n);
18990 + return n;
18991 +}
18992 +
18993 +/**
18994 + * copy_from_user: - Copy a block of data from user space.
18995 + * @to: Destination address, in kernel space.
18996 + * @from: Source address, in user space.
18997 + * @n: Number of bytes to copy.
18998 + *
18999 + * Context: User context only. This function may sleep.
19000 + *
19001 + * Copy data from user space to kernel space.
19002 + *
19003 + * Returns number of bytes that could not be copied.
19004 + * On success, this will be zero.
19005 + *
19006 + * If some data could not be copied, this function will pad the copied
19007 + * data to the requested size using zero bytes.
19008 + */
19009 +static inline unsigned long __must_check
19010 +copy_from_user(void *to, const void __user *from, unsigned long n)
19011 +{
19012 + size_t sz = __compiletime_object_size(to);
19013 +
19014 + check_object_size(to, n, false);
19015 +
19016 + if (unlikely(sz != (size_t)-1 && sz < n))
19017 copy_from_user_overflow();
19018 -
19019 + else if (access_ok(VERIFY_READ, from, n))
19020 + n = __copy_from_user(to, from, n);
19021 + else if ((long)n > 0)
19022 + memset(to, 0, n);
19023 return n;
19024 }
19025
19026 diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
19027 index 4f7923d..201b58d 100644
19028 --- a/arch/x86/include/asm/uaccess_64.h
19029 +++ b/arch/x86/include/asm/uaccess_64.h
19030 @@ -10,6 +10,9 @@
19031 #include <asm/alternative.h>
19032 #include <asm/cpufeature.h>
19033 #include <asm/page.h>
19034 +#include <asm/pgtable.h>
19035 +
19036 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
19037
19038 /*
19039 * Copy To/From Userspace
19040 @@ -17,14 +20,14 @@
19041
19042 /* Handles exceptions in both to and from, but doesn't do access_ok */
19043 __must_check unsigned long
19044 -copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
19045 +copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
19046 __must_check unsigned long
19047 -copy_user_generic_string(void *to, const void *from, unsigned len);
19048 +copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
19049 __must_check unsigned long
19050 -copy_user_generic_unrolled(void *to, const void *from, unsigned len);
19051 +copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
19052
19053 -static __always_inline __must_check unsigned long
19054 -copy_user_generic(void *to, const void *from, unsigned len)
19055 +static __always_inline __must_check unsigned long
19056 +copy_user_generic(void *to, const void *from, unsigned long len)
19057 {
19058 unsigned ret;
19059
19060 @@ -45,138 +48,200 @@ copy_user_generic(void *to, const void *from, unsigned len)
19061 return ret;
19062 }
19063
19064 +static __always_inline __must_check unsigned long
19065 +__copy_to_user(void __user *to, const void *from, unsigned long len);
19066 +static __always_inline __must_check unsigned long
19067 +__copy_from_user(void *to, const void __user *from, unsigned long len);
19068 __must_check unsigned long
19069 -_copy_to_user(void __user *to, const void *from, unsigned len);
19070 -__must_check unsigned long
19071 -_copy_from_user(void *to, const void __user *from, unsigned len);
19072 -__must_check unsigned long
19073 -copy_in_user(void __user *to, const void __user *from, unsigned len);
19074 +copy_in_user(void __user *to, const void __user *from, unsigned long len);
19075 +
19076 +extern void copy_to_user_overflow(void)
19077 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19078 + __compiletime_error("copy_to_user() buffer size is not provably correct")
19079 +#else
19080 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
19081 +#endif
19082 +;
19083 +
19084 +extern void copy_from_user_overflow(void)
19085 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
19086 + __compiletime_error("copy_from_user() buffer size is not provably correct")
19087 +#else
19088 + __compiletime_warning("copy_from_user() buffer size is not provably correct")
19089 +#endif
19090 +;
19091
19092 static inline unsigned long __must_check copy_from_user(void *to,
19093 const void __user *from,
19094 unsigned long n)
19095 {
19096 - int sz = __compiletime_object_size(to);
19097 -
19098 might_fault();
19099 - if (likely(sz == -1 || sz >= n))
19100 - n = _copy_from_user(to, from, n);
19101 -#ifdef CONFIG_DEBUG_VM
19102 - else
19103 - WARN(1, "Buffer overflow detected!\n");
19104 -#endif
19105 +
19106 + check_object_size(to, n, false);
19107 +
19108 + if (access_ok(VERIFY_READ, from, n))
19109 + n = __copy_from_user(to, from, n);
19110 + else if (n < INT_MAX)
19111 + memset(to, 0, n);
19112 return n;
19113 }
19114
19115 static __always_inline __must_check
19116 -int copy_to_user(void __user *dst, const void *src, unsigned size)
19117 +int copy_to_user(void __user *dst, const void *src, unsigned long size)
19118 {
19119 might_fault();
19120
19121 - return _copy_to_user(dst, src, size);
19122 + if (access_ok(VERIFY_WRITE, dst, size))
19123 + size = __copy_to_user(dst, src, size);
19124 + return size;
19125 }
19126
19127 static __always_inline __must_check
19128 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
19129 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
19130 {
19131 - int ret = 0;
19132 + size_t sz = __compiletime_object_size(dst);
19133 + unsigned ret = 0;
19134
19135 might_fault();
19136 +
19137 + if (size > INT_MAX)
19138 + return size;
19139 +
19140 + check_object_size(dst, size, false);
19141 +
19142 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19143 + if (!__access_ok(VERIFY_READ, src, size))
19144 + return size;
19145 +#endif
19146 +
19147 + if (unlikely(sz != (size_t)-1 && sz < size)) {
19148 + copy_from_user_overflow();
19149 + return size;
19150 + }
19151 +
19152 if (!__builtin_constant_p(size))
19153 - return copy_user_generic(dst, (__force void *)src, size);
19154 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19155 switch (size) {
19156 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
19157 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
19158 ret, "b", "b", "=q", 1);
19159 return ret;
19160 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
19161 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
19162 ret, "w", "w", "=r", 2);
19163 return ret;
19164 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
19165 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
19166 ret, "l", "k", "=r", 4);
19167 return ret;
19168 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
19169 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19170 ret, "q", "", "=r", 8);
19171 return ret;
19172 case 10:
19173 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
19174 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19175 ret, "q", "", "=r", 10);
19176 if (unlikely(ret))
19177 return ret;
19178 __get_user_asm(*(u16 *)(8 + (char *)dst),
19179 - (u16 __user *)(8 + (char __user *)src),
19180 + (const u16 __user *)(8 + (const char __user *)src),
19181 ret, "w", "w", "=r", 2);
19182 return ret;
19183 case 16:
19184 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
19185 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
19186 ret, "q", "", "=r", 16);
19187 if (unlikely(ret))
19188 return ret;
19189 __get_user_asm(*(u64 *)(8 + (char *)dst),
19190 - (u64 __user *)(8 + (char __user *)src),
19191 + (const u64 __user *)(8 + (const char __user *)src),
19192 ret, "q", "", "=r", 8);
19193 return ret;
19194 default:
19195 - return copy_user_generic(dst, (__force void *)src, size);
19196 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19197 }
19198 }
19199
19200 static __always_inline __must_check
19201 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
19202 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
19203 {
19204 - int ret = 0;
19205 + size_t sz = __compiletime_object_size(src);
19206 + unsigned ret = 0;
19207
19208 might_fault();
19209 +
19210 + if (size > INT_MAX)
19211 + return size;
19212 +
19213 + check_object_size(src, size, true);
19214 +
19215 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19216 + if (!__access_ok(VERIFY_WRITE, dst, size))
19217 + return size;
19218 +#endif
19219 +
19220 + if (unlikely(sz != (size_t)-1 && sz < size)) {
19221 + copy_to_user_overflow();
19222 + return size;
19223 + }
19224 +
19225 if (!__builtin_constant_p(size))
19226 - return copy_user_generic((__force void *)dst, src, size);
19227 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19228 switch (size) {
19229 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
19230 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
19231 ret, "b", "b", "iq", 1);
19232 return ret;
19233 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
19234 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
19235 ret, "w", "w", "ir", 2);
19236 return ret;
19237 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
19238 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
19239 ret, "l", "k", "ir", 4);
19240 return ret;
19241 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
19242 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19243 ret, "q", "", "er", 8);
19244 return ret;
19245 case 10:
19246 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
19247 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19248 ret, "q", "", "er", 10);
19249 if (unlikely(ret))
19250 return ret;
19251 asm("":::"memory");
19252 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
19253 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
19254 ret, "w", "w", "ir", 2);
19255 return ret;
19256 case 16:
19257 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
19258 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
19259 ret, "q", "", "er", 16);
19260 if (unlikely(ret))
19261 return ret;
19262 asm("":::"memory");
19263 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
19264 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
19265 ret, "q", "", "er", 8);
19266 return ret;
19267 default:
19268 - return copy_user_generic((__force void *)dst, src, size);
19269 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19270 }
19271 }
19272
19273 static __always_inline __must_check
19274 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19275 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
19276 {
19277 - int ret = 0;
19278 + unsigned ret = 0;
19279
19280 might_fault();
19281 +
19282 + if (size > INT_MAX)
19283 + return size;
19284 +
19285 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19286 + if (!__access_ok(VERIFY_READ, src, size))
19287 + return size;
19288 + if (!__access_ok(VERIFY_WRITE, dst, size))
19289 + return size;
19290 +#endif
19291 +
19292 if (!__builtin_constant_p(size))
19293 - return copy_user_generic((__force void *)dst,
19294 - (__force void *)src, size);
19295 + return copy_user_generic((__force_kernel void *)____m(dst),
19296 + (__force_kernel const void *)____m(src), size);
19297 switch (size) {
19298 case 1: {
19299 u8 tmp;
19300 - __get_user_asm(tmp, (u8 __user *)src,
19301 + __get_user_asm(tmp, (const u8 __user *)src,
19302 ret, "b", "b", "=q", 1);
19303 if (likely(!ret))
19304 __put_user_asm(tmp, (u8 __user *)dst,
19305 @@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19306 }
19307 case 2: {
19308 u16 tmp;
19309 - __get_user_asm(tmp, (u16 __user *)src,
19310 + __get_user_asm(tmp, (const u16 __user *)src,
19311 ret, "w", "w", "=r", 2);
19312 if (likely(!ret))
19313 __put_user_asm(tmp, (u16 __user *)dst,
19314 @@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19315
19316 case 4: {
19317 u32 tmp;
19318 - __get_user_asm(tmp, (u32 __user *)src,
19319 + __get_user_asm(tmp, (const u32 __user *)src,
19320 ret, "l", "k", "=r", 4);
19321 if (likely(!ret))
19322 __put_user_asm(tmp, (u32 __user *)dst,
19323 @@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19324 }
19325 case 8: {
19326 u64 tmp;
19327 - __get_user_asm(tmp, (u64 __user *)src,
19328 + __get_user_asm(tmp, (const u64 __user *)src,
19329 ret, "q", "", "=r", 8);
19330 if (likely(!ret))
19331 __put_user_asm(tmp, (u64 __user *)dst,
19332 @@ -212,41 +277,74 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
19333 return ret;
19334 }
19335 default:
19336 - return copy_user_generic((__force void *)dst,
19337 - (__force void *)src, size);
19338 + return copy_user_generic((__force_kernel void *)____m(dst),
19339 + (__force_kernel const void *)____m(src), size);
19340 }
19341 }
19342
19343 -static __must_check __always_inline int
19344 -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
19345 +static __must_check __always_inline unsigned long
19346 +__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
19347 {
19348 - return copy_user_generic(dst, (__force const void *)src, size);
19349 + if (size > INT_MAX)
19350 + return size;
19351 +
19352 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19353 + if (!__access_ok(VERIFY_READ, src, size))
19354 + return size;
19355 +#endif
19356 +
19357 + return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
19358 }
19359
19360 -static __must_check __always_inline int
19361 -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
19362 +static __must_check __always_inline unsigned long
19363 +__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
19364 {
19365 - return copy_user_generic((__force void *)dst, src, size);
19366 + if (size > INT_MAX)
19367 + return size;
19368 +
19369 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19370 + if (!__access_ok(VERIFY_WRITE, dst, size))
19371 + return size;
19372 +#endif
19373 +
19374 + return copy_user_generic((__force_kernel void *)____m(dst), src, size);
19375 }
19376
19377 -extern long __copy_user_nocache(void *dst, const void __user *src,
19378 - unsigned size, int zerorest);
19379 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
19380 + unsigned long size, int zerorest);
19381
19382 -static inline int
19383 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
19384 +static inline unsigned long
19385 +__copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
19386 {
19387 might_fault();
19388 +
19389 + if (size > INT_MAX)
19390 + return size;
19391 +
19392 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19393 + if (!__access_ok(VERIFY_READ, src, size))
19394 + return size;
19395 +#endif
19396 +
19397 return __copy_user_nocache(dst, src, size, 1);
19398 }
19399
19400 -static inline int
19401 +static inline unsigned long
19402 __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
19403 - unsigned size)
19404 + unsigned long size)
19405 {
19406 + if (size > INT_MAX)
19407 + return size;
19408 +
19409 +#ifdef CONFIG_PAX_MEMORY_UDEREF
19410 + if (!__access_ok(VERIFY_READ, src, size))
19411 + return size;
19412 +#endif
19413 +
19414 return __copy_user_nocache(dst, src, size, 0);
19415 }
19416
19417 unsigned long
19418 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
19419 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
19420
19421 #endif /* _ASM_X86_UACCESS_64_H */
19422 diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
19423 index 5b238981..77fdd78 100644
19424 --- a/arch/x86/include/asm/word-at-a-time.h
19425 +++ b/arch/x86/include/asm/word-at-a-time.h
19426 @@ -11,7 +11,7 @@
19427 * and shift, for example.
19428 */
19429 struct word_at_a_time {
19430 - const unsigned long one_bits, high_bits;
19431 + unsigned long one_bits, high_bits;
19432 };
19433
19434 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
19435 diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
19436 index 828a156..650e625 100644
19437 --- a/arch/x86/include/asm/x86_init.h
19438 +++ b/arch/x86/include/asm/x86_init.h
19439 @@ -129,7 +129,7 @@ struct x86_init_ops {
19440 struct x86_init_timers timers;
19441 struct x86_init_iommu iommu;
19442 struct x86_init_pci pci;
19443 -};
19444 +} __no_const;
19445
19446 /**
19447 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
19448 @@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
19449 void (*setup_percpu_clockev)(void);
19450 void (*early_percpu_clock_init)(void);
19451 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
19452 -};
19453 +} __no_const;
19454
19455 struct timespec;
19456
19457 @@ -168,7 +168,7 @@ struct x86_platform_ops {
19458 void (*save_sched_clock_state)(void);
19459 void (*restore_sched_clock_state)(void);
19460 void (*apic_post_init)(void);
19461 -};
19462 +} __no_const;
19463
19464 struct pci_dev;
19465 struct msi_msg;
19466 @@ -182,7 +182,7 @@ struct x86_msi_ops {
19467 void (*teardown_msi_irqs)(struct pci_dev *dev);
19468 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
19469 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
19470 -};
19471 +} __no_const;
19472
19473 struct IO_APIC_route_entry;
19474 struct io_apic_irq_attr;
19475 @@ -203,7 +203,7 @@ struct x86_io_apic_ops {
19476 unsigned int destination, int vector,
19477 struct io_apic_irq_attr *attr);
19478 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
19479 -};
19480 +} __no_const;
19481
19482 extern struct x86_init_ops x86_init;
19483 extern struct x86_cpuinit_ops x86_cpuinit;
19484 diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
19485 index b913915..4f5a581 100644
19486 --- a/arch/x86/include/asm/xen/page.h
19487 +++ b/arch/x86/include/asm/xen/page.h
19488 @@ -56,7 +56,7 @@ extern int m2p_remove_override(struct page *page,
19489 extern struct page *m2p_find_override(unsigned long mfn);
19490 extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
19491
19492 -static inline unsigned long pfn_to_mfn(unsigned long pfn)
19493 +static inline unsigned long __intentional_overflow(-1) pfn_to_mfn(unsigned long pfn)
19494 {
19495 unsigned long mfn;
19496
19497 diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
19498 index 0415cda..3b22adc 100644
19499 --- a/arch/x86/include/asm/xsave.h
19500 +++ b/arch/x86/include/asm/xsave.h
19501 @@ -70,8 +70,11 @@ static inline int xsave_user(struct xsave_struct __user *buf)
19502 if (unlikely(err))
19503 return -EFAULT;
19504
19505 + pax_open_userland();
19506 __asm__ __volatile__(ASM_STAC "\n"
19507 - "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
19508 + "1:"
19509 + __copyuser_seg
19510 + ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
19511 "2: " ASM_CLAC "\n"
19512 ".section .fixup,\"ax\"\n"
19513 "3: movl $-1,%[err]\n"
19514 @@ -81,18 +84,22 @@ static inline int xsave_user(struct xsave_struct __user *buf)
19515 : [err] "=r" (err)
19516 : "D" (buf), "a" (-1), "d" (-1), "0" (0)
19517 : "memory");
19518 + pax_close_userland();
19519 return err;
19520 }
19521
19522 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
19523 {
19524 int err;
19525 - struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
19526 + struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
19527 u32 lmask = mask;
19528 u32 hmask = mask >> 32;
19529
19530 + pax_open_userland();
19531 __asm__ __volatile__(ASM_STAC "\n"
19532 - "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
19533 + "1:"
19534 + __copyuser_seg
19535 + ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
19536 "2: " ASM_CLAC "\n"
19537 ".section .fixup,\"ax\"\n"
19538 "3: movl $-1,%[err]\n"
19539 @@ -102,6 +109,7 @@ static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
19540 : [err] "=r" (err)
19541 : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
19542 : "memory"); /* memory required? */
19543 + pax_close_userland();
19544 return err;
19545 }
19546
19547 diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
19548 index bbae024..e1528f9 100644
19549 --- a/arch/x86/include/uapi/asm/e820.h
19550 +++ b/arch/x86/include/uapi/asm/e820.h
19551 @@ -63,7 +63,7 @@ struct e820map {
19552 #define ISA_START_ADDRESS 0xa0000
19553 #define ISA_END_ADDRESS 0x100000
19554
19555 -#define BIOS_BEGIN 0x000a0000
19556 +#define BIOS_BEGIN 0x000c0000
19557 #define BIOS_END 0x00100000
19558
19559 #define BIOS_ROM_BASE 0xffe00000
19560 diff --git a/arch/x86/include/uapi/asm/ptrace-abi.h b/arch/x86/include/uapi/asm/ptrace-abi.h
19561 index 7b0a55a..ad115bf 100644
19562 --- a/arch/x86/include/uapi/asm/ptrace-abi.h
19563 +++ b/arch/x86/include/uapi/asm/ptrace-abi.h
19564 @@ -49,7 +49,6 @@
19565 #define EFLAGS 144
19566 #define RSP 152
19567 #define SS 160
19568 -#define ARGOFFSET R11
19569 #endif /* __ASSEMBLY__ */
19570
19571 /* top of stack page */
19572 diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
19573 index a5408b9..5133813 100644
19574 --- a/arch/x86/kernel/Makefile
19575 +++ b/arch/x86/kernel/Makefile
19576 @@ -24,7 +24,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
19577 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
19578 obj-$(CONFIG_IRQ_WORK) += irq_work.o
19579 obj-y += probe_roms.o
19580 -obj-$(CONFIG_X86_32) += i386_ksyms_32.o
19581 +obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
19582 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
19583 obj-y += syscall_$(BITS).o
19584 obj-$(CONFIG_X86_64) += vsyscall_64.o
19585 diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
19586 index 40c7660..f709f4b 100644
19587 --- a/arch/x86/kernel/acpi/boot.c
19588 +++ b/arch/x86/kernel/acpi/boot.c
19589 @@ -1365,7 +1365,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
19590 * If your system is blacklisted here, but you find that acpi=force
19591 * works for you, please contact linux-acpi@vger.kernel.org
19592 */
19593 -static struct dmi_system_id __initdata acpi_dmi_table[] = {
19594 +static const struct dmi_system_id __initconst acpi_dmi_table[] = {
19595 /*
19596 * Boxes that need ACPI disabled
19597 */
19598 @@ -1440,7 +1440,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
19599 };
19600
19601 /* second table for DMI checks that should run after early-quirks */
19602 -static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
19603 +static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
19604 /*
19605 * HP laptops which use a DSDT reporting as HP/SB400/10000,
19606 * which includes some code which overrides all temperature
19607 diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
19608 index 3312010..a65ca7b 100644
19609 --- a/arch/x86/kernel/acpi/sleep.c
19610 +++ b/arch/x86/kernel/acpi/sleep.c
19611 @@ -88,8 +88,12 @@ int x86_acpi_suspend_lowlevel(void)
19612 #else /* CONFIG_64BIT */
19613 #ifdef CONFIG_SMP
19614 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
19615 +
19616 + pax_open_kernel();
19617 early_gdt_descr.address =
19618 (unsigned long)get_cpu_gdt_table(smp_processor_id());
19619 + pax_close_kernel();
19620 +
19621 initial_gs = per_cpu_offset(smp_processor_id());
19622 #endif
19623 initial_code = (unsigned long)wakeup_long64;
19624 diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
19625 index d1daa66..59fecba 100644
19626 --- a/arch/x86/kernel/acpi/wakeup_32.S
19627 +++ b/arch/x86/kernel/acpi/wakeup_32.S
19628 @@ -29,13 +29,11 @@ wakeup_pmode_return:
19629 # and restore the stack ... but you need gdt for this to work
19630 movl saved_context_esp, %esp
19631
19632 - movl %cs:saved_magic, %eax
19633 - cmpl $0x12345678, %eax
19634 + cmpl $0x12345678, saved_magic
19635 jne bogus_magic
19636
19637 # jump to place where we left off
19638 - movl saved_eip, %eax
19639 - jmp *%eax
19640 + jmp *(saved_eip)
19641
19642 bogus_magic:
19643 jmp bogus_magic
19644 diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
19645 index 15e8563..323cbe1 100644
19646 --- a/arch/x86/kernel/alternative.c
19647 +++ b/arch/x86/kernel/alternative.c
19648 @@ -269,6 +269,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
19649 */
19650 for (a = start; a < end; a++) {
19651 instr = (u8 *)&a->instr_offset + a->instr_offset;
19652 +
19653 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19654 + instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19655 + if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
19656 + instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19657 +#endif
19658 +
19659 replacement = (u8 *)&a->repl_offset + a->repl_offset;
19660 BUG_ON(a->replacementlen > a->instrlen);
19661 BUG_ON(a->instrlen > sizeof(insnbuf));
19662 @@ -300,10 +307,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
19663 for (poff = start; poff < end; poff++) {
19664 u8 *ptr = (u8 *)poff + *poff;
19665
19666 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19667 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19668 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
19669 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19670 +#endif
19671 +
19672 if (!*poff || ptr < text || ptr >= text_end)
19673 continue;
19674 /* turn DS segment override prefix into lock prefix */
19675 - if (*ptr == 0x3e)
19676 + if (*ktla_ktva(ptr) == 0x3e)
19677 text_poke(ptr, ((unsigned char []){0xf0}), 1);
19678 }
19679 mutex_unlock(&text_mutex);
19680 @@ -318,10 +331,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
19681 for (poff = start; poff < end; poff++) {
19682 u8 *ptr = (u8 *)poff + *poff;
19683
19684 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
19685 + ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19686 + if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
19687 + ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
19688 +#endif
19689 +
19690 if (!*poff || ptr < text || ptr >= text_end)
19691 continue;
19692 /* turn lock prefix into DS segment override prefix */
19693 - if (*ptr == 0xf0)
19694 + if (*ktla_ktva(ptr) == 0xf0)
19695 text_poke(ptr, ((unsigned char []){0x3E}), 1);
19696 }
19697 mutex_unlock(&text_mutex);
19698 @@ -469,7 +488,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
19699
19700 BUG_ON(p->len > MAX_PATCH_LEN);
19701 /* prep the buffer with the original instructions */
19702 - memcpy(insnbuf, p->instr, p->len);
19703 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
19704 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
19705 (unsigned long)p->instr, p->len);
19706
19707 @@ -516,7 +535,7 @@ void __init alternative_instructions(void)
19708 if (!uniproc_patched || num_possible_cpus() == 1)
19709 free_init_pages("SMP alternatives",
19710 (unsigned long)__smp_locks,
19711 - (unsigned long)__smp_locks_end);
19712 + PAGE_ALIGN((unsigned long)__smp_locks_end));
19713 #endif
19714
19715 apply_paravirt(__parainstructions, __parainstructions_end);
19716 @@ -536,13 +555,17 @@ void __init alternative_instructions(void)
19717 * instructions. And on the local CPU you need to be protected again NMI or MCE
19718 * handlers seeing an inconsistent instruction while you patch.
19719 */
19720 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
19721 +void *__kprobes text_poke_early(void *addr, const void *opcode,
19722 size_t len)
19723 {
19724 unsigned long flags;
19725 local_irq_save(flags);
19726 - memcpy(addr, opcode, len);
19727 +
19728 + pax_open_kernel();
19729 + memcpy(ktla_ktva(addr), opcode, len);
19730 sync_core();
19731 + pax_close_kernel();
19732 +
19733 local_irq_restore(flags);
19734 /* Could also do a CLFLUSH here to speed up CPU recovery; but
19735 that causes hangs on some VIA CPUs. */
19736 @@ -564,36 +587,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
19737 */
19738 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
19739 {
19740 - unsigned long flags;
19741 - char *vaddr;
19742 + unsigned char *vaddr = ktla_ktva(addr);
19743 struct page *pages[2];
19744 - int i;
19745 + size_t i;
19746
19747 if (!core_kernel_text((unsigned long)addr)) {
19748 - pages[0] = vmalloc_to_page(addr);
19749 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
19750 + pages[0] = vmalloc_to_page(vaddr);
19751 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
19752 } else {
19753 - pages[0] = virt_to_page(addr);
19754 + pages[0] = virt_to_page(vaddr);
19755 WARN_ON(!PageReserved(pages[0]));
19756 - pages[1] = virt_to_page(addr + PAGE_SIZE);
19757 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
19758 }
19759 BUG_ON(!pages[0]);
19760 - local_irq_save(flags);
19761 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
19762 - if (pages[1])
19763 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
19764 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
19765 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
19766 - clear_fixmap(FIX_TEXT_POKE0);
19767 - if (pages[1])
19768 - clear_fixmap(FIX_TEXT_POKE1);
19769 - local_flush_tlb();
19770 - sync_core();
19771 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
19772 - that causes hangs on some VIA CPUs. */
19773 + text_poke_early(addr, opcode, len);
19774 for (i = 0; i < len; i++)
19775 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
19776 - local_irq_restore(flags);
19777 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
19778 return addr;
19779 }
19780
19781 @@ -613,7 +622,7 @@ int poke_int3_handler(struct pt_regs *regs)
19782 if (likely(!bp_patching_in_progress))
19783 return 0;
19784
19785 - if (user_mode_vm(regs) || regs->ip != (unsigned long)bp_int3_addr)
19786 + if (user_mode(regs) || regs->ip != (unsigned long)bp_int3_addr)
19787 return 0;
19788
19789 /* set up the specified breakpoint handler */
19790 @@ -647,7 +656,7 @@ int poke_int3_handler(struct pt_regs *regs)
19791 */
19792 void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler)
19793 {
19794 - unsigned char int3 = 0xcc;
19795 + const unsigned char int3 = 0xcc;
19796
19797 bp_int3_handler = handler;
19798 bp_int3_addr = (u8 *)addr + sizeof(int3);
19799 diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
19800 index a7eb82d..f6e52d4 100644
19801 --- a/arch/x86/kernel/apic/apic.c
19802 +++ b/arch/x86/kernel/apic/apic.c
19803 @@ -190,7 +190,7 @@ int first_system_vector = 0xfe;
19804 /*
19805 * Debug level, exported for io_apic.c
19806 */
19807 -unsigned int apic_verbosity;
19808 +int apic_verbosity;
19809
19810 int pic_mode;
19811
19812 @@ -1985,7 +1985,7 @@ static inline void __smp_error_interrupt(struct pt_regs *regs)
19813 apic_write(APIC_ESR, 0);
19814 v1 = apic_read(APIC_ESR);
19815 ack_APIC_irq();
19816 - atomic_inc(&irq_err_count);
19817 + atomic_inc_unchecked(&irq_err_count);
19818
19819 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
19820 smp_processor_id(), v0 , v1);
19821 diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
19822 index 00c77cf..2dc6a2d 100644
19823 --- a/arch/x86/kernel/apic/apic_flat_64.c
19824 +++ b/arch/x86/kernel/apic/apic_flat_64.c
19825 @@ -157,7 +157,7 @@ static int flat_probe(void)
19826 return 1;
19827 }
19828
19829 -static struct apic apic_flat = {
19830 +static struct apic apic_flat __read_only = {
19831 .name = "flat",
19832 .probe = flat_probe,
19833 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
19834 @@ -271,7 +271,7 @@ static int physflat_probe(void)
19835 return 0;
19836 }
19837
19838 -static struct apic apic_physflat = {
19839 +static struct apic apic_physflat __read_only = {
19840
19841 .name = "physical flat",
19842 .probe = physflat_probe,
19843 diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
19844 index e145f28..2752888 100644
19845 --- a/arch/x86/kernel/apic/apic_noop.c
19846 +++ b/arch/x86/kernel/apic/apic_noop.c
19847 @@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
19848 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
19849 }
19850
19851 -struct apic apic_noop = {
19852 +struct apic apic_noop __read_only = {
19853 .name = "noop",
19854 .probe = noop_probe,
19855 .acpi_madt_oem_check = NULL,
19856 diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
19857 index d50e364..543bee3 100644
19858 --- a/arch/x86/kernel/apic/bigsmp_32.c
19859 +++ b/arch/x86/kernel/apic/bigsmp_32.c
19860 @@ -152,7 +152,7 @@ static int probe_bigsmp(void)
19861 return dmi_bigsmp;
19862 }
19863
19864 -static struct apic apic_bigsmp = {
19865 +static struct apic apic_bigsmp __read_only = {
19866
19867 .name = "bigsmp",
19868 .probe = probe_bigsmp,
19869 diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
19870 index c552247..587a316 100644
19871 --- a/arch/x86/kernel/apic/es7000_32.c
19872 +++ b/arch/x86/kernel/apic/es7000_32.c
19873 @@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
19874 return ret && es7000_apic_is_cluster();
19875 }
19876
19877 -/* We've been warned by a false positive warning.Use __refdata to keep calm. */
19878 -static struct apic __refdata apic_es7000_cluster = {
19879 +static struct apic apic_es7000_cluster __read_only = {
19880
19881 .name = "es7000",
19882 .probe = probe_es7000,
19883 @@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
19884 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
19885 };
19886
19887 -static struct apic __refdata apic_es7000 = {
19888 +static struct apic apic_es7000 __read_only = {
19889
19890 .name = "es7000",
19891 .probe = probe_es7000,
19892 diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
19893 index e63a5bd..c0babf8 100644
19894 --- a/arch/x86/kernel/apic/io_apic.c
19895 +++ b/arch/x86/kernel/apic/io_apic.c
19896 @@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
19897 }
19898 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
19899
19900 -void lock_vector_lock(void)
19901 +void lock_vector_lock(void) __acquires(vector_lock)
19902 {
19903 /* Used to the online set of cpus does not change
19904 * during assign_irq_vector.
19905 @@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
19906 raw_spin_lock(&vector_lock);
19907 }
19908
19909 -void unlock_vector_lock(void)
19910 +void unlock_vector_lock(void) __releases(vector_lock)
19911 {
19912 raw_spin_unlock(&vector_lock);
19913 }
19914 @@ -2367,7 +2367,7 @@ static void ack_apic_edge(struct irq_data *data)
19915 ack_APIC_irq();
19916 }
19917
19918 -atomic_t irq_mis_count;
19919 +atomic_unchecked_t irq_mis_count;
19920
19921 #ifdef CONFIG_GENERIC_PENDING_IRQ
19922 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
19923 @@ -2508,7 +2508,7 @@ static void ack_apic_level(struct irq_data *data)
19924 * at the cpu.
19925 */
19926 if (!(v & (1 << (i & 0x1f)))) {
19927 - atomic_inc(&irq_mis_count);
19928 + atomic_inc_unchecked(&irq_mis_count);
19929
19930 eoi_ioapic_irq(irq, cfg);
19931 }
19932 diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
19933 index 1e42e8f..daacf44 100644
19934 --- a/arch/x86/kernel/apic/numaq_32.c
19935 +++ b/arch/x86/kernel/apic/numaq_32.c
19936 @@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
19937 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
19938 }
19939
19940 -/* Use __refdata to keep false positive warning calm. */
19941 -static struct apic __refdata apic_numaq = {
19942 +static struct apic apic_numaq __read_only = {
19943
19944 .name = "NUMAQ",
19945 .probe = probe_numaq,
19946 diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
19947 index eb35ef9..f184a21 100644
19948 --- a/arch/x86/kernel/apic/probe_32.c
19949 +++ b/arch/x86/kernel/apic/probe_32.c
19950 @@ -72,7 +72,7 @@ static int probe_default(void)
19951 return 1;
19952 }
19953
19954 -static struct apic apic_default = {
19955 +static struct apic apic_default __read_only = {
19956
19957 .name = "default",
19958 .probe = probe_default,
19959 diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
19960 index 77c95c0..434f8a4 100644
19961 --- a/arch/x86/kernel/apic/summit_32.c
19962 +++ b/arch/x86/kernel/apic/summit_32.c
19963 @@ -486,7 +486,7 @@ void setup_summit(void)
19964 }
19965 #endif
19966
19967 -static struct apic apic_summit = {
19968 +static struct apic apic_summit __read_only = {
19969
19970 .name = "summit",
19971 .probe = probe_summit,
19972 diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
19973 index 140e29d..d88bc95 100644
19974 --- a/arch/x86/kernel/apic/x2apic_cluster.c
19975 +++ b/arch/x86/kernel/apic/x2apic_cluster.c
19976 @@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
19977 return notifier_from_errno(err);
19978 }
19979
19980 -static struct notifier_block __refdata x2apic_cpu_notifier = {
19981 +static struct notifier_block x2apic_cpu_notifier = {
19982 .notifier_call = update_clusterinfo,
19983 };
19984
19985 @@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
19986 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
19987 }
19988
19989 -static struct apic apic_x2apic_cluster = {
19990 +static struct apic apic_x2apic_cluster __read_only = {
19991
19992 .name = "cluster x2apic",
19993 .probe = x2apic_cluster_probe,
19994 diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
19995 index 562a76d..a003c0f 100644
19996 --- a/arch/x86/kernel/apic/x2apic_phys.c
19997 +++ b/arch/x86/kernel/apic/x2apic_phys.c
19998 @@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
19999 return apic == &apic_x2apic_phys;
20000 }
20001
20002 -static struct apic apic_x2apic_phys = {
20003 +static struct apic apic_x2apic_phys __read_only = {
20004
20005 .name = "physical x2apic",
20006 .probe = x2apic_phys_probe,
20007 diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
20008 index a419814..1dd34a0 100644
20009 --- a/arch/x86/kernel/apic/x2apic_uv_x.c
20010 +++ b/arch/x86/kernel/apic/x2apic_uv_x.c
20011 @@ -357,7 +357,7 @@ static int uv_probe(void)
20012 return apic == &apic_x2apic_uv_x;
20013 }
20014
20015 -static struct apic __refdata apic_x2apic_uv_x = {
20016 +static struct apic apic_x2apic_uv_x __read_only = {
20017
20018 .name = "UV large system",
20019 .probe = uv_probe,
20020 diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
20021 index 3ab0343..814c4787 100644
20022 --- a/arch/x86/kernel/apm_32.c
20023 +++ b/arch/x86/kernel/apm_32.c
20024 @@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
20025 * This is for buggy BIOS's that refer to (real mode) segment 0x40
20026 * even though they are called in protected mode.
20027 */
20028 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
20029 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
20030 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
20031
20032 static const char driver_version[] = "1.16ac"; /* no spaces */
20033 @@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
20034 BUG_ON(cpu != 0);
20035 gdt = get_cpu_gdt_table(cpu);
20036 save_desc_40 = gdt[0x40 / 8];
20037 +
20038 + pax_open_kernel();
20039 gdt[0x40 / 8] = bad_bios_desc;
20040 + pax_close_kernel();
20041
20042 apm_irq_save(flags);
20043 APM_DO_SAVE_SEGS;
20044 @@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
20045 &call->esi);
20046 APM_DO_RESTORE_SEGS;
20047 apm_irq_restore(flags);
20048 +
20049 + pax_open_kernel();
20050 gdt[0x40 / 8] = save_desc_40;
20051 + pax_close_kernel();
20052 +
20053 put_cpu();
20054
20055 return call->eax & 0xff;
20056 @@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
20057 BUG_ON(cpu != 0);
20058 gdt = get_cpu_gdt_table(cpu);
20059 save_desc_40 = gdt[0x40 / 8];
20060 +
20061 + pax_open_kernel();
20062 gdt[0x40 / 8] = bad_bios_desc;
20063 + pax_close_kernel();
20064
20065 apm_irq_save(flags);
20066 APM_DO_SAVE_SEGS;
20067 @@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
20068 &call->eax);
20069 APM_DO_RESTORE_SEGS;
20070 apm_irq_restore(flags);
20071 +
20072 + pax_open_kernel();
20073 gdt[0x40 / 8] = save_desc_40;
20074 + pax_close_kernel();
20075 +
20076 put_cpu();
20077 return error;
20078 }
20079 @@ -2362,12 +2376,15 @@ static int __init apm_init(void)
20080 * code to that CPU.
20081 */
20082 gdt = get_cpu_gdt_table(0);
20083 +
20084 + pax_open_kernel();
20085 set_desc_base(&gdt[APM_CS >> 3],
20086 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
20087 set_desc_base(&gdt[APM_CS_16 >> 3],
20088 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
20089 set_desc_base(&gdt[APM_DS >> 3],
20090 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
20091 + pax_close_kernel();
20092
20093 proc_create("apm", 0, NULL, &apm_file_ops);
20094
20095 diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
20096 index 2861082..6d4718e 100644
20097 --- a/arch/x86/kernel/asm-offsets.c
20098 +++ b/arch/x86/kernel/asm-offsets.c
20099 @@ -33,6 +33,8 @@ void common(void) {
20100 OFFSET(TI_status, thread_info, status);
20101 OFFSET(TI_addr_limit, thread_info, addr_limit);
20102 OFFSET(TI_preempt_count, thread_info, preempt_count);
20103 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
20104 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
20105
20106 BLANK();
20107 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
20108 @@ -53,8 +55,26 @@ void common(void) {
20109 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
20110 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
20111 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
20112 +
20113 +#ifdef CONFIG_PAX_KERNEXEC
20114 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
20115 #endif
20116
20117 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20118 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
20119 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
20120 +#ifdef CONFIG_X86_64
20121 + OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
20122 +#endif
20123 +#endif
20124 +
20125 +#endif
20126 +
20127 + BLANK();
20128 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
20129 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
20130 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
20131 +
20132 #ifdef CONFIG_XEN
20133 BLANK();
20134 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
20135 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
20136 index e7c798b..2b2019b 100644
20137 --- a/arch/x86/kernel/asm-offsets_64.c
20138 +++ b/arch/x86/kernel/asm-offsets_64.c
20139 @@ -77,6 +77,7 @@ int main(void)
20140 BLANK();
20141 #undef ENTRY
20142
20143 + DEFINE(TSS_size, sizeof(struct tss_struct));
20144 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
20145 BLANK();
20146
20147 diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
20148 index 47b56a7..efc2bc6 100644
20149 --- a/arch/x86/kernel/cpu/Makefile
20150 +++ b/arch/x86/kernel/cpu/Makefile
20151 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
20152 CFLAGS_REMOVE_perf_event.o = -pg
20153 endif
20154
20155 -# Make sure load_percpu_segment has no stackprotector
20156 -nostackp := $(call cc-option, -fno-stack-protector)
20157 -CFLAGS_common.o := $(nostackp)
20158 -
20159 obj-y := intel_cacheinfo.o scattered.o topology.o
20160 obj-y += proc.o capflags.o powerflags.o common.o
20161 obj-y += rdrand.o
20162 diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
20163 index 903a264..fc955f3 100644
20164 --- a/arch/x86/kernel/cpu/amd.c
20165 +++ b/arch/x86/kernel/cpu/amd.c
20166 @@ -743,7 +743,7 @@ static void init_amd(struct cpuinfo_x86 *c)
20167 static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
20168 {
20169 /* AMD errata T13 (order #21922) */
20170 - if ((c->x86 == 6)) {
20171 + if (c->x86 == 6) {
20172 /* Duron Rev A0 */
20173 if (c->x86_model == 3 && c->x86_mask == 0)
20174 size = 64;
20175 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
20176 index 2793d1f..b4f313a 100644
20177 --- a/arch/x86/kernel/cpu/common.c
20178 +++ b/arch/x86/kernel/cpu/common.c
20179 @@ -88,60 +88,6 @@ static const struct cpu_dev default_cpu = {
20180
20181 static const struct cpu_dev *this_cpu = &default_cpu;
20182
20183 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
20184 -#ifdef CONFIG_X86_64
20185 - /*
20186 - * We need valid kernel segments for data and code in long mode too
20187 - * IRET will check the segment types kkeil 2000/10/28
20188 - * Also sysret mandates a special GDT layout
20189 - *
20190 - * TLS descriptors are currently at a different place compared to i386.
20191 - * Hopefully nobody expects them at a fixed place (Wine?)
20192 - */
20193 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
20194 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
20195 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
20196 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
20197 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
20198 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
20199 -#else
20200 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
20201 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20202 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
20203 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
20204 - /*
20205 - * Segments used for calling PnP BIOS have byte granularity.
20206 - * They code segments and data segments have fixed 64k limits,
20207 - * the transfer segment sizes are set at run time.
20208 - */
20209 - /* 32-bit code */
20210 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
20211 - /* 16-bit code */
20212 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
20213 - /* 16-bit data */
20214 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
20215 - /* 16-bit data */
20216 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
20217 - /* 16-bit data */
20218 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
20219 - /*
20220 - * The APM segments have byte granularity and their bases
20221 - * are set at run time. All have 64k limits.
20222 - */
20223 - /* 32-bit code */
20224 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
20225 - /* 16-bit code */
20226 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
20227 - /* data */
20228 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
20229 -
20230 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20231 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
20232 - GDT_STACK_CANARY_INIT
20233 -#endif
20234 -} };
20235 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
20236 -
20237 static int __init x86_xsave_setup(char *s)
20238 {
20239 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
20240 @@ -288,6 +234,59 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
20241 set_in_cr4(X86_CR4_SMAP);
20242 }
20243
20244 +#ifdef CONFIG_X86_64
20245 +static __init int setup_disable_pcid(char *arg)
20246 +{
20247 + setup_clear_cpu_cap(X86_FEATURE_PCID);
20248 + setup_clear_cpu_cap(X86_FEATURE_INVPCID);
20249 +
20250 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20251 + if (clone_pgd_mask != ~(pgdval_t)0UL)
20252 + pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
20253 +#endif
20254 +
20255 + return 1;
20256 +}
20257 +__setup("nopcid", setup_disable_pcid);
20258 +
20259 +static void setup_pcid(struct cpuinfo_x86 *c)
20260 +{
20261 + if (!cpu_has(c, X86_FEATURE_PCID)) {
20262 + clear_cpu_cap(c, X86_FEATURE_INVPCID);
20263 +
20264 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20265 + if (clone_pgd_mask != ~(pgdval_t)0UL) {
20266 + pax_open_kernel();
20267 + pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
20268 + pax_close_kernel();
20269 + printk("PAX: slow and weak UDEREF enabled\n");
20270 + } else
20271 + printk("PAX: UDEREF disabled\n");
20272 +#endif
20273 +
20274 + return;
20275 + }
20276 +
20277 + printk("PAX: PCID detected\n");
20278 + set_in_cr4(X86_CR4_PCIDE);
20279 +
20280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
20281 + pax_open_kernel();
20282 + clone_pgd_mask = ~(pgdval_t)0UL;
20283 + pax_close_kernel();
20284 + if (pax_user_shadow_base)
20285 + printk("PAX: weak UDEREF enabled\n");
20286 + else {
20287 + set_cpu_cap(c, X86_FEATURE_STRONGUDEREF);
20288 + printk("PAX: strong UDEREF enabled\n");
20289 + }
20290 +#endif
20291 +
20292 + if (cpu_has(c, X86_FEATURE_INVPCID))
20293 + printk("PAX: INVPCID detected\n");
20294 +}
20295 +#endif
20296 +
20297 /*
20298 * Some CPU features depend on higher CPUID levels, which may not always
20299 * be available due to CPUID level capping or broken virtualization
20300 @@ -386,7 +385,7 @@ void switch_to_new_gdt(int cpu)
20301 {
20302 struct desc_ptr gdt_descr;
20303
20304 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
20305 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
20306 gdt_descr.size = GDT_SIZE - 1;
20307 load_gdt(&gdt_descr);
20308 /* Reload the per-cpu base */
20309 @@ -875,6 +874,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
20310 setup_smep(c);
20311 setup_smap(c);
20312
20313 +#ifdef CONFIG_X86_64
20314 + setup_pcid(c);
20315 +#endif
20316 +
20317 /*
20318 * The vendor-specific functions might have changed features.
20319 * Now we do "generic changes."
20320 @@ -883,6 +886,10 @@ static void identify_cpu(struct cpuinfo_x86 *c)
20321 /* Filter out anything that depends on CPUID levels we don't have */
20322 filter_cpuid_features(c, true);
20323
20324 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
20325 + setup_clear_cpu_cap(X86_FEATURE_SEP);
20326 +#endif
20327 +
20328 /* If the model name is still unset, do table lookup. */
20329 if (!c->x86_model_id[0]) {
20330 const char *p;
20331 @@ -1070,10 +1077,12 @@ static __init int setup_disablecpuid(char *arg)
20332 }
20333 __setup("clearcpuid=", setup_disablecpuid);
20334
20335 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
20336 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
20337 +
20338 #ifdef CONFIG_X86_64
20339 -struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
20340 -struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
20341 - (unsigned long) debug_idt_table };
20342 +struct desc_ptr idt_descr __read_only = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
20343 +const struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) debug_idt_table };
20344
20345 DEFINE_PER_CPU_FIRST(union irq_stack_union,
20346 irq_stack_union) __aligned(PAGE_SIZE) __visible;
20347 @@ -1087,7 +1096,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
20348 EXPORT_PER_CPU_SYMBOL(current_task);
20349
20350 DEFINE_PER_CPU(unsigned long, kernel_stack) =
20351 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
20352 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
20353 EXPORT_PER_CPU_SYMBOL(kernel_stack);
20354
20355 DEFINE_PER_CPU(char *, irq_stack_ptr) =
20356 @@ -1232,7 +1241,7 @@ void cpu_init(void)
20357 load_ucode_ap();
20358
20359 cpu = stack_smp_processor_id();
20360 - t = &per_cpu(init_tss, cpu);
20361 + t = init_tss + cpu;
20362 oist = &per_cpu(orig_ist, cpu);
20363
20364 #ifdef CONFIG_NUMA
20365 @@ -1267,7 +1276,6 @@ void cpu_init(void)
20366 wrmsrl(MSR_KERNEL_GS_BASE, 0);
20367 barrier();
20368
20369 - x86_configure_nx();
20370 enable_x2apic();
20371
20372 /*
20373 @@ -1319,7 +1327,7 @@ void cpu_init(void)
20374 {
20375 int cpu = smp_processor_id();
20376 struct task_struct *curr = current;
20377 - struct tss_struct *t = &per_cpu(init_tss, cpu);
20378 + struct tss_struct *t = init_tss + cpu;
20379 struct thread_struct *thread = &curr->thread;
20380
20381 show_ucode_info_early();
20382 diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
20383 index 1414c90..1159406 100644
20384 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c
20385 +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
20386 @@ -1014,6 +1014,22 @@ static struct attribute *default_attrs[] = {
20387 };
20388
20389 #ifdef CONFIG_AMD_NB
20390 +static struct attribute *default_attrs_amd_nb[] = {
20391 + &type.attr,
20392 + &level.attr,
20393 + &coherency_line_size.attr,
20394 + &physical_line_partition.attr,
20395 + &ways_of_associativity.attr,
20396 + &number_of_sets.attr,
20397 + &size.attr,
20398 + &shared_cpu_map.attr,
20399 + &shared_cpu_list.attr,
20400 + NULL,
20401 + NULL,
20402 + NULL,
20403 + NULL
20404 +};
20405 +
20406 static struct attribute **amd_l3_attrs(void)
20407 {
20408 static struct attribute **attrs;
20409 @@ -1024,18 +1040,7 @@ static struct attribute **amd_l3_attrs(void)
20410
20411 n = ARRAY_SIZE(default_attrs);
20412
20413 - if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
20414 - n += 2;
20415 -
20416 - if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
20417 - n += 1;
20418 -
20419 - attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
20420 - if (attrs == NULL)
20421 - return attrs = default_attrs;
20422 -
20423 - for (n = 0; default_attrs[n]; n++)
20424 - attrs[n] = default_attrs[n];
20425 + attrs = default_attrs_amd_nb;
20426
20427 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
20428 attrs[n++] = &cache_disable_0.attr;
20429 @@ -1086,6 +1091,13 @@ static struct kobj_type ktype_cache = {
20430 .default_attrs = default_attrs,
20431 };
20432
20433 +#ifdef CONFIG_AMD_NB
20434 +static struct kobj_type ktype_cache_amd_nb = {
20435 + .sysfs_ops = &sysfs_ops,
20436 + .default_attrs = default_attrs_amd_nb,
20437 +};
20438 +#endif
20439 +
20440 static struct kobj_type ktype_percpu_entry = {
20441 .sysfs_ops = &sysfs_ops,
20442 };
20443 @@ -1151,20 +1163,26 @@ static int cache_add_dev(struct device *dev)
20444 return retval;
20445 }
20446
20447 +#ifdef CONFIG_AMD_NB
20448 + amd_l3_attrs();
20449 +#endif
20450 +
20451 for (i = 0; i < num_cache_leaves; i++) {
20452 + struct kobj_type *ktype;
20453 +
20454 this_object = INDEX_KOBJECT_PTR(cpu, i);
20455 this_object->cpu = cpu;
20456 this_object->index = i;
20457
20458 this_leaf = CPUID4_INFO_IDX(cpu, i);
20459
20460 - ktype_cache.default_attrs = default_attrs;
20461 + ktype = &ktype_cache;
20462 #ifdef CONFIG_AMD_NB
20463 if (this_leaf->base.nb)
20464 - ktype_cache.default_attrs = amd_l3_attrs();
20465 + ktype = &ktype_cache_amd_nb;
20466 #endif
20467 retval = kobject_init_and_add(&(this_object->kobj),
20468 - &ktype_cache,
20469 + ktype,
20470 per_cpu(ici_cache_kobject, cpu),
20471 "index%1lu", i);
20472 if (unlikely(retval)) {
20473 diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
20474 index b3218cd..99a75de 100644
20475 --- a/arch/x86/kernel/cpu/mcheck/mce.c
20476 +++ b/arch/x86/kernel/cpu/mcheck/mce.c
20477 @@ -45,6 +45,7 @@
20478 #include <asm/processor.h>
20479 #include <asm/mce.h>
20480 #include <asm/msr.h>
20481 +#include <asm/local.h>
20482
20483 #include "mce-internal.h"
20484
20485 @@ -258,7 +259,7 @@ static void print_mce(struct mce *m)
20486 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
20487 m->cs, m->ip);
20488
20489 - if (m->cs == __KERNEL_CS)
20490 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
20491 print_symbol("{%s}", m->ip);
20492 pr_cont("\n");
20493 }
20494 @@ -291,10 +292,10 @@ static void print_mce(struct mce *m)
20495
20496 #define PANIC_TIMEOUT 5 /* 5 seconds */
20497
20498 -static atomic_t mce_paniced;
20499 +static atomic_unchecked_t mce_paniced;
20500
20501 static int fake_panic;
20502 -static atomic_t mce_fake_paniced;
20503 +static atomic_unchecked_t mce_fake_paniced;
20504
20505 /* Panic in progress. Enable interrupts and wait for final IPI */
20506 static void wait_for_panic(void)
20507 @@ -318,7 +319,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
20508 /*
20509 * Make sure only one CPU runs in machine check panic
20510 */
20511 - if (atomic_inc_return(&mce_paniced) > 1)
20512 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
20513 wait_for_panic();
20514 barrier();
20515
20516 @@ -326,7 +327,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
20517 console_verbose();
20518 } else {
20519 /* Don't log too much for fake panic */
20520 - if (atomic_inc_return(&mce_fake_paniced) > 1)
20521 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
20522 return;
20523 }
20524 /* First print corrected ones that are still unlogged */
20525 @@ -365,7 +366,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
20526 if (!fake_panic) {
20527 if (panic_timeout == 0)
20528 panic_timeout = mca_cfg.panic_timeout;
20529 - panic(msg);
20530 + panic("%s", msg);
20531 } else
20532 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
20533 }
20534 @@ -695,7 +696,7 @@ static int mce_timed_out(u64 *t)
20535 * might have been modified by someone else.
20536 */
20537 rmb();
20538 - if (atomic_read(&mce_paniced))
20539 + if (atomic_read_unchecked(&mce_paniced))
20540 wait_for_panic();
20541 if (!mca_cfg.monarch_timeout)
20542 goto out;
20543 @@ -1666,7 +1667,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
20544 }
20545
20546 /* Call the installed machine check handler for this CPU setup. */
20547 -void (*machine_check_vector)(struct pt_regs *, long error_code) =
20548 +void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
20549 unexpected_machine_check;
20550
20551 /*
20552 @@ -1689,7 +1690,9 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
20553 return;
20554 }
20555
20556 + pax_open_kernel();
20557 machine_check_vector = do_machine_check;
20558 + pax_close_kernel();
20559
20560 __mcheck_cpu_init_generic();
20561 __mcheck_cpu_init_vendor(c);
20562 @@ -1703,7 +1706,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
20563 */
20564
20565 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
20566 -static int mce_chrdev_open_count; /* #times opened */
20567 +static local_t mce_chrdev_open_count; /* #times opened */
20568 static int mce_chrdev_open_exclu; /* already open exclusive? */
20569
20570 static int mce_chrdev_open(struct inode *inode, struct file *file)
20571 @@ -1711,7 +1714,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
20572 spin_lock(&mce_chrdev_state_lock);
20573
20574 if (mce_chrdev_open_exclu ||
20575 - (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
20576 + (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
20577 spin_unlock(&mce_chrdev_state_lock);
20578
20579 return -EBUSY;
20580 @@ -1719,7 +1722,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
20581
20582 if (file->f_flags & O_EXCL)
20583 mce_chrdev_open_exclu = 1;
20584 - mce_chrdev_open_count++;
20585 + local_inc(&mce_chrdev_open_count);
20586
20587 spin_unlock(&mce_chrdev_state_lock);
20588
20589 @@ -1730,7 +1733,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
20590 {
20591 spin_lock(&mce_chrdev_state_lock);
20592
20593 - mce_chrdev_open_count--;
20594 + local_dec(&mce_chrdev_open_count);
20595 mce_chrdev_open_exclu = 0;
20596
20597 spin_unlock(&mce_chrdev_state_lock);
20598 @@ -2404,7 +2407,7 @@ static __init void mce_init_banks(void)
20599
20600 for (i = 0; i < mca_cfg.banks; i++) {
20601 struct mce_bank *b = &mce_banks[i];
20602 - struct device_attribute *a = &b->attr;
20603 + device_attribute_no_const *a = &b->attr;
20604
20605 sysfs_attr_init(&a->attr);
20606 a->attr.name = b->attrname;
20607 @@ -2472,7 +2475,7 @@ struct dentry *mce_get_debugfs_dir(void)
20608 static void mce_reset(void)
20609 {
20610 cpu_missing = 0;
20611 - atomic_set(&mce_fake_paniced, 0);
20612 + atomic_set_unchecked(&mce_fake_paniced, 0);
20613 atomic_set(&mce_executing, 0);
20614 atomic_set(&mce_callin, 0);
20615 atomic_set(&global_nwo, 0);
20616 diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
20617 index 1c044b1..37a2a43 100644
20618 --- a/arch/x86/kernel/cpu/mcheck/p5.c
20619 +++ b/arch/x86/kernel/cpu/mcheck/p5.c
20620 @@ -11,6 +11,7 @@
20621 #include <asm/processor.h>
20622 #include <asm/mce.h>
20623 #include <asm/msr.h>
20624 +#include <asm/pgtable.h>
20625
20626 /* By default disabled */
20627 int mce_p5_enabled __read_mostly;
20628 @@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
20629 if (!cpu_has(c, X86_FEATURE_MCE))
20630 return;
20631
20632 + pax_open_kernel();
20633 machine_check_vector = pentium_machine_check;
20634 + pax_close_kernel();
20635 /* Make sure the vector pointer is visible before we enable MCEs: */
20636 wmb();
20637
20638 diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
20639 index e9a701a..35317d6 100644
20640 --- a/arch/x86/kernel/cpu/mcheck/winchip.c
20641 +++ b/arch/x86/kernel/cpu/mcheck/winchip.c
20642 @@ -10,6 +10,7 @@
20643 #include <asm/processor.h>
20644 #include <asm/mce.h>
20645 #include <asm/msr.h>
20646 +#include <asm/pgtable.h>
20647
20648 /* Machine check handler for WinChip C6: */
20649 static void winchip_machine_check(struct pt_regs *regs, long error_code)
20650 @@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
20651 {
20652 u32 lo, hi;
20653
20654 + pax_open_kernel();
20655 machine_check_vector = winchip_machine_check;
20656 + pax_close_kernel();
20657 /* Make sure the vector pointer is visible before we enable MCEs: */
20658 wmb();
20659
20660 diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
20661 index f961de9..8a9d332 100644
20662 --- a/arch/x86/kernel/cpu/mtrr/main.c
20663 +++ b/arch/x86/kernel/cpu/mtrr/main.c
20664 @@ -66,7 +66,7 @@ static DEFINE_MUTEX(mtrr_mutex);
20665 u64 size_or_mask, size_and_mask;
20666 static bool mtrr_aps_delayed_init;
20667
20668 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
20669 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
20670
20671 const struct mtrr_ops *mtrr_if;
20672
20673 diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
20674 index df5e41f..816c719 100644
20675 --- a/arch/x86/kernel/cpu/mtrr/mtrr.h
20676 +++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
20677 @@ -25,7 +25,7 @@ struct mtrr_ops {
20678 int (*validate_add_page)(unsigned long base, unsigned long size,
20679 unsigned int type);
20680 int (*have_wrcomb)(void);
20681 -};
20682 +} __do_const;
20683
20684 extern int generic_get_free_region(unsigned long base, unsigned long size,
20685 int replace_reg);
20686 diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
20687 index 8a87a32..682a22a 100644
20688 --- a/arch/x86/kernel/cpu/perf_event.c
20689 +++ b/arch/x86/kernel/cpu/perf_event.c
20690 @@ -1348,7 +1348,7 @@ static void __init pmu_check_apic(void)
20691 pr_info("no hardware sampling interrupt available.\n");
20692 }
20693
20694 -static struct attribute_group x86_pmu_format_group = {
20695 +static attribute_group_no_const x86_pmu_format_group = {
20696 .name = "format",
20697 .attrs = NULL,
20698 };
20699 @@ -1447,7 +1447,7 @@ static struct attribute *events_attr[] = {
20700 NULL,
20701 };
20702
20703 -static struct attribute_group x86_pmu_events_group = {
20704 +static attribute_group_no_const x86_pmu_events_group = {
20705 .name = "events",
20706 .attrs = events_attr,
20707 };
20708 @@ -1958,7 +1958,7 @@ static unsigned long get_segment_base(unsigned int segment)
20709 if (idx > GDT_ENTRIES)
20710 return 0;
20711
20712 - desc = __this_cpu_ptr(&gdt_page.gdt[0]);
20713 + desc = get_cpu_gdt_table(smp_processor_id());
20714 }
20715
20716 return get_desc_base(desc + idx);
20717 @@ -2048,7 +2048,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
20718 break;
20719
20720 perf_callchain_store(entry, frame.return_address);
20721 - fp = frame.next_frame;
20722 + fp = (const void __force_user *)frame.next_frame;
20723 }
20724 }
20725
20726 diff --git a/arch/x86/kernel/cpu/perf_event_amd_iommu.c b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
20727 index 639d128..e92d7e5 100644
20728 --- a/arch/x86/kernel/cpu/perf_event_amd_iommu.c
20729 +++ b/arch/x86/kernel/cpu/perf_event_amd_iommu.c
20730 @@ -405,7 +405,7 @@ static void perf_iommu_del(struct perf_event *event, int flags)
20731 static __init int _init_events_attrs(struct perf_amd_iommu *perf_iommu)
20732 {
20733 struct attribute **attrs;
20734 - struct attribute_group *attr_group;
20735 + attribute_group_no_const *attr_group;
20736 int i = 0, j;
20737
20738 while (amd_iommu_v2_event_descs[i].attr.attr.name)
20739 diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
20740 index f31a165..7b46cd8 100644
20741 --- a/arch/x86/kernel/cpu/perf_event_intel.c
20742 +++ b/arch/x86/kernel/cpu/perf_event_intel.c
20743 @@ -2247,10 +2247,10 @@ __init int intel_pmu_init(void)
20744 * v2 and above have a perf capabilities MSR
20745 */
20746 if (version > 1) {
20747 - u64 capabilities;
20748 + u64 capabilities = x86_pmu.intel_cap.capabilities;
20749
20750 - rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
20751 - x86_pmu.intel_cap.capabilities = capabilities;
20752 + if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
20753 + x86_pmu.intel_cap.capabilities = capabilities;
20754 }
20755
20756 intel_ds_init();
20757 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
20758 index 4118f9f..f91d0ab 100644
20759 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
20760 +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
20761 @@ -3204,7 +3204,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
20762 static int __init uncore_type_init(struct intel_uncore_type *type)
20763 {
20764 struct intel_uncore_pmu *pmus;
20765 - struct attribute_group *attr_group;
20766 + attribute_group_no_const *attr_group;
20767 struct attribute **attrs;
20768 int i, j;
20769
20770 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
20771 index a80ab71..4089da5 100644
20772 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
20773 +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
20774 @@ -498,7 +498,7 @@ struct intel_uncore_box {
20775 struct uncore_event_desc {
20776 struct kobj_attribute attr;
20777 const char *config;
20778 -};
20779 +} __do_const;
20780
20781 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
20782 { \
20783 diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
20784 index 7d9481c..99c7e4b 100644
20785 --- a/arch/x86/kernel/cpuid.c
20786 +++ b/arch/x86/kernel/cpuid.c
20787 @@ -170,7 +170,7 @@ static int cpuid_class_cpu_callback(struct notifier_block *nfb,
20788 return notifier_from_errno(err);
20789 }
20790
20791 -static struct notifier_block __refdata cpuid_class_cpu_notifier =
20792 +static struct notifier_block cpuid_class_cpu_notifier =
20793 {
20794 .notifier_call = cpuid_class_cpu_callback,
20795 };
20796 diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
20797 index 18677a9..f67c45b 100644
20798 --- a/arch/x86/kernel/crash.c
20799 +++ b/arch/x86/kernel/crash.c
20800 @@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
20801 {
20802 #ifdef CONFIG_X86_32
20803 struct pt_regs fixed_regs;
20804 -#endif
20805
20806 -#ifdef CONFIG_X86_32
20807 - if (!user_mode_vm(regs)) {
20808 + if (!user_mode(regs)) {
20809 crash_fixup_ss_esp(&fixed_regs, regs);
20810 regs = &fixed_regs;
20811 }
20812 diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
20813 index afa64ad..dce67dd 100644
20814 --- a/arch/x86/kernel/crash_dump_64.c
20815 +++ b/arch/x86/kernel/crash_dump_64.c
20816 @@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
20817 return -ENOMEM;
20818
20819 if (userbuf) {
20820 - if (copy_to_user(buf, vaddr + offset, csize)) {
20821 + if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
20822 iounmap(vaddr);
20823 return -EFAULT;
20824 }
20825 diff --git a/arch/x86/kernel/doublefault.c b/arch/x86/kernel/doublefault.c
20826 index 5d3fe8d..02e1429 100644
20827 --- a/arch/x86/kernel/doublefault.c
20828 +++ b/arch/x86/kernel/doublefault.c
20829 @@ -13,7 +13,7 @@
20830
20831 #define DOUBLEFAULT_STACKSIZE (1024)
20832 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
20833 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
20834 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
20835
20836 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
20837
20838 @@ -23,7 +23,7 @@ static void doublefault_fn(void)
20839 unsigned long gdt, tss;
20840
20841 native_store_gdt(&gdt_desc);
20842 - gdt = gdt_desc.address;
20843 + gdt = (unsigned long)gdt_desc.address;
20844
20845 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
20846
20847 @@ -60,10 +60,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
20848 /* 0x2 bit is always set */
20849 .flags = X86_EFLAGS_SF | 0x2,
20850 .sp = STACK_START,
20851 - .es = __USER_DS,
20852 + .es = __KERNEL_DS,
20853 .cs = __KERNEL_CS,
20854 .ss = __KERNEL_DS,
20855 - .ds = __USER_DS,
20856 + .ds = __KERNEL_DS,
20857 .fs = __KERNEL_PERCPU,
20858
20859 .__cr3 = __pa_nodebug(swapper_pg_dir),
20860 diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
20861 index deb6421..76bbc12 100644
20862 --- a/arch/x86/kernel/dumpstack.c
20863 +++ b/arch/x86/kernel/dumpstack.c
20864 @@ -2,6 +2,9 @@
20865 * Copyright (C) 1991, 1992 Linus Torvalds
20866 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
20867 */
20868 +#ifdef CONFIG_GRKERNSEC_HIDESYM
20869 +#define __INCLUDED_BY_HIDESYM 1
20870 +#endif
20871 #include <linux/kallsyms.h>
20872 #include <linux/kprobes.h>
20873 #include <linux/uaccess.h>
20874 @@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
20875 static void
20876 print_ftrace_graph_addr(unsigned long addr, void *data,
20877 const struct stacktrace_ops *ops,
20878 - struct thread_info *tinfo, int *graph)
20879 + struct task_struct *task, int *graph)
20880 {
20881 - struct task_struct *task;
20882 unsigned long ret_addr;
20883 int index;
20884
20885 if (addr != (unsigned long)return_to_handler)
20886 return;
20887
20888 - task = tinfo->task;
20889 index = task->curr_ret_stack;
20890
20891 if (!task->ret_stack || index < *graph)
20892 @@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
20893 static inline void
20894 print_ftrace_graph_addr(unsigned long addr, void *data,
20895 const struct stacktrace_ops *ops,
20896 - struct thread_info *tinfo, int *graph)
20897 + struct task_struct *task, int *graph)
20898 { }
20899 #endif
20900
20901 @@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
20902 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
20903 */
20904
20905 -static inline int valid_stack_ptr(struct thread_info *tinfo,
20906 - void *p, unsigned int size, void *end)
20907 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
20908 {
20909 - void *t = tinfo;
20910 if (end) {
20911 if (p < end && p >= (end-THREAD_SIZE))
20912 return 1;
20913 @@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
20914 }
20915
20916 unsigned long
20917 -print_context_stack(struct thread_info *tinfo,
20918 +print_context_stack(struct task_struct *task, void *stack_start,
20919 unsigned long *stack, unsigned long bp,
20920 const struct stacktrace_ops *ops, void *data,
20921 unsigned long *end, int *graph)
20922 {
20923 struct stack_frame *frame = (struct stack_frame *)bp;
20924
20925 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
20926 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
20927 unsigned long addr;
20928
20929 addr = *stack;
20930 @@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
20931 } else {
20932 ops->address(data, addr, 0);
20933 }
20934 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
20935 + print_ftrace_graph_addr(addr, data, ops, task, graph);
20936 }
20937 stack++;
20938 }
20939 @@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
20940 EXPORT_SYMBOL_GPL(print_context_stack);
20941
20942 unsigned long
20943 -print_context_stack_bp(struct thread_info *tinfo,
20944 +print_context_stack_bp(struct task_struct *task, void *stack_start,
20945 unsigned long *stack, unsigned long bp,
20946 const struct stacktrace_ops *ops, void *data,
20947 unsigned long *end, int *graph)
20948 @@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
20949 struct stack_frame *frame = (struct stack_frame *)bp;
20950 unsigned long *ret_addr = &frame->return_address;
20951
20952 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
20953 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
20954 unsigned long addr = *ret_addr;
20955
20956 if (!__kernel_text_address(addr))
20957 @@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
20958 ops->address(data, addr, 1);
20959 frame = frame->next_frame;
20960 ret_addr = &frame->return_address;
20961 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
20962 + print_ftrace_graph_addr(addr, data, ops, task, graph);
20963 }
20964
20965 return (unsigned long)frame;
20966 @@ -150,7 +149,7 @@ static int print_trace_stack(void *data, char *name)
20967 static void print_trace_address(void *data, unsigned long addr, int reliable)
20968 {
20969 touch_nmi_watchdog();
20970 - printk(data);
20971 + printk("%s", (char *)data);
20972 printk_address(addr, reliable);
20973 }
20974
20975 @@ -219,6 +218,8 @@ unsigned __kprobes long oops_begin(void)
20976 }
20977 EXPORT_SYMBOL_GPL(oops_begin);
20978
20979 +extern void gr_handle_kernel_exploit(void);
20980 +
20981 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
20982 {
20983 if (regs && kexec_should_crash(current))
20984 @@ -240,7 +241,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
20985 panic("Fatal exception in interrupt");
20986 if (panic_on_oops)
20987 panic("Fatal exception");
20988 - do_exit(signr);
20989 +
20990 + gr_handle_kernel_exploit();
20991 +
20992 + do_group_exit(signr);
20993 }
20994
20995 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
20996 @@ -268,7 +272,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
20997 print_modules();
20998 show_regs(regs);
20999 #ifdef CONFIG_X86_32
21000 - if (user_mode_vm(regs)) {
21001 + if (user_mode(regs)) {
21002 sp = regs->sp;
21003 ss = regs->ss & 0xffff;
21004 } else {
21005 @@ -296,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
21006 unsigned long flags = oops_begin();
21007 int sig = SIGSEGV;
21008
21009 - if (!user_mode_vm(regs))
21010 + if (!user_mode(regs))
21011 report_bug(regs->ip, regs);
21012
21013 if (__die(str, regs, err))
21014 diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
21015 index f2a1770..540657f 100644
21016 --- a/arch/x86/kernel/dumpstack_32.c
21017 +++ b/arch/x86/kernel/dumpstack_32.c
21018 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21019 bp = stack_frame(task, regs);
21020
21021 for (;;) {
21022 - struct thread_info *context;
21023 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
21024
21025 - context = (struct thread_info *)
21026 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
21027 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
21028 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
21029
21030 - stack = (unsigned long *)context->previous_esp;
21031 - if (!stack)
21032 + if (stack_start == task_stack_page(task))
21033 break;
21034 + stack = *(unsigned long **)stack_start;
21035 if (ops->stack(data, "IRQ") < 0)
21036 break;
21037 touch_nmi_watchdog();
21038 @@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
21039 int i;
21040
21041 show_regs_print_info(KERN_EMERG);
21042 - __show_regs(regs, !user_mode_vm(regs));
21043 + __show_regs(regs, !user_mode(regs));
21044
21045 /*
21046 * When in-kernel, we also print out the stack and code at the
21047 * time of the fault..
21048 */
21049 - if (!user_mode_vm(regs)) {
21050 + if (!user_mode(regs)) {
21051 unsigned int code_prologue = code_bytes * 43 / 64;
21052 unsigned int code_len = code_bytes;
21053 unsigned char c;
21054 u8 *ip;
21055 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
21056
21057 pr_emerg("Stack:\n");
21058 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
21059
21060 pr_emerg("Code:");
21061
21062 - ip = (u8 *)regs->ip - code_prologue;
21063 + ip = (u8 *)regs->ip - code_prologue + cs_base;
21064 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
21065 /* try starting at IP */
21066 - ip = (u8 *)regs->ip;
21067 + ip = (u8 *)regs->ip + cs_base;
21068 code_len = code_len - code_prologue + 1;
21069 }
21070 for (i = 0; i < code_len; i++, ip++) {
21071 @@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
21072 pr_cont(" Bad EIP value.");
21073 break;
21074 }
21075 - if (ip == (u8 *)regs->ip)
21076 + if (ip == (u8 *)regs->ip + cs_base)
21077 pr_cont(" <%02x>", c);
21078 else
21079 pr_cont(" %02x", c);
21080 @@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
21081 {
21082 unsigned short ud2;
21083
21084 + ip = ktla_ktva(ip);
21085 if (ip < PAGE_OFFSET)
21086 return 0;
21087 if (probe_kernel_address((unsigned short *)ip, ud2))
21088 @@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
21089
21090 return ud2 == 0x0b0f;
21091 }
21092 +
21093 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21094 +void pax_check_alloca(unsigned long size)
21095 +{
21096 + unsigned long sp = (unsigned long)&sp, stack_left;
21097 +
21098 + /* all kernel stacks are of the same size */
21099 + stack_left = sp & (THREAD_SIZE - 1);
21100 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
21101 +}
21102 +EXPORT_SYMBOL(pax_check_alloca);
21103 +#endif
21104 diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
21105 index addb207..99635fa 100644
21106 --- a/arch/x86/kernel/dumpstack_64.c
21107 +++ b/arch/x86/kernel/dumpstack_64.c
21108 @@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21109 unsigned long *irq_stack_end =
21110 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
21111 unsigned used = 0;
21112 - struct thread_info *tinfo;
21113 int graph = 0;
21114 unsigned long dummy;
21115 + void *stack_start;
21116
21117 if (!task)
21118 task = current;
21119 @@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21120 * current stack address. If the stacks consist of nested
21121 * exceptions
21122 */
21123 - tinfo = task_thread_info(task);
21124 for (;;) {
21125 char *id;
21126 unsigned long *estack_end;
21127 +
21128 estack_end = in_exception_stack(cpu, (unsigned long)stack,
21129 &used, &id);
21130
21131 @@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21132 if (ops->stack(data, id) < 0)
21133 break;
21134
21135 - bp = ops->walk_stack(tinfo, stack, bp, ops,
21136 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
21137 data, estack_end, &graph);
21138 ops->stack(data, "<EOE>");
21139 /*
21140 @@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21141 * second-to-last pointer (index -2 to end) in the
21142 * exception stack:
21143 */
21144 + if ((u16)estack_end[-1] != __KERNEL_DS)
21145 + goto out;
21146 stack = (unsigned long *) estack_end[-2];
21147 continue;
21148 }
21149 @@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21150 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
21151 if (ops->stack(data, "IRQ") < 0)
21152 break;
21153 - bp = ops->walk_stack(tinfo, stack, bp,
21154 + bp = ops->walk_stack(task, irq_stack, stack, bp,
21155 ops, data, irq_stack_end, &graph);
21156 /*
21157 * We link to the next stack (which would be
21158 @@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
21159 /*
21160 * This handles the process stack:
21161 */
21162 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
21163 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
21164 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
21165 +out:
21166 put_cpu();
21167 }
21168 EXPORT_SYMBOL(dump_trace);
21169 @@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
21170
21171 return ud2 == 0x0b0f;
21172 }
21173 +
21174 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21175 +void pax_check_alloca(unsigned long size)
21176 +{
21177 + unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
21178 + unsigned cpu, used;
21179 + char *id;
21180 +
21181 + /* check the process stack first */
21182 + stack_start = (unsigned long)task_stack_page(current);
21183 + stack_end = stack_start + THREAD_SIZE;
21184 + if (likely(stack_start <= sp && sp < stack_end)) {
21185 + unsigned long stack_left = sp & (THREAD_SIZE - 1);
21186 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
21187 + return;
21188 + }
21189 +
21190 + cpu = get_cpu();
21191 +
21192 + /* check the irq stacks */
21193 + stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
21194 + stack_start = stack_end - IRQ_STACK_SIZE;
21195 + if (stack_start <= sp && sp < stack_end) {
21196 + unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
21197 + put_cpu();
21198 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
21199 + return;
21200 + }
21201 +
21202 + /* check the exception stacks */
21203 + used = 0;
21204 + stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
21205 + stack_start = stack_end - EXCEPTION_STKSZ;
21206 + if (stack_end && stack_start <= sp && sp < stack_end) {
21207 + unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
21208 + put_cpu();
21209 + BUG_ON(stack_left < 256 || size >= stack_left - 256);
21210 + return;
21211 + }
21212 +
21213 + put_cpu();
21214 +
21215 + /* unknown stack */
21216 + BUG();
21217 +}
21218 +EXPORT_SYMBOL(pax_check_alloca);
21219 +#endif
21220 diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
21221 index 174da5f..5e55606 100644
21222 --- a/arch/x86/kernel/e820.c
21223 +++ b/arch/x86/kernel/e820.c
21224 @@ -803,8 +803,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
21225
21226 static void early_panic(char *msg)
21227 {
21228 - early_printk(msg);
21229 - panic(msg);
21230 + early_printk("%s", msg);
21231 + panic("%s", msg);
21232 }
21233
21234 static int userdef __initdata;
21235 diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
21236 index d15f575..d692043 100644
21237 --- a/arch/x86/kernel/early_printk.c
21238 +++ b/arch/x86/kernel/early_printk.c
21239 @@ -7,6 +7,7 @@
21240 #include <linux/pci_regs.h>
21241 #include <linux/pci_ids.h>
21242 #include <linux/errno.h>
21243 +#include <linux/sched.h>
21244 #include <asm/io.h>
21245 #include <asm/processor.h>
21246 #include <asm/fcntl.h>
21247 diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
21248 index f0dcb0c..9f39b80 100644
21249 --- a/arch/x86/kernel/entry_32.S
21250 +++ b/arch/x86/kernel/entry_32.S
21251 @@ -177,13 +177,153 @@
21252 /*CFI_REL_OFFSET gs, PT_GS*/
21253 .endm
21254 .macro SET_KERNEL_GS reg
21255 +
21256 +#ifdef CONFIG_CC_STACKPROTECTOR
21257 movl $(__KERNEL_STACK_CANARY), \reg
21258 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
21259 + movl $(__USER_DS), \reg
21260 +#else
21261 + xorl \reg, \reg
21262 +#endif
21263 +
21264 movl \reg, %gs
21265 .endm
21266
21267 #endif /* CONFIG_X86_32_LAZY_GS */
21268
21269 -.macro SAVE_ALL
21270 +.macro pax_enter_kernel
21271 +#ifdef CONFIG_PAX_KERNEXEC
21272 + call pax_enter_kernel
21273 +#endif
21274 +.endm
21275 +
21276 +.macro pax_exit_kernel
21277 +#ifdef CONFIG_PAX_KERNEXEC
21278 + call pax_exit_kernel
21279 +#endif
21280 +.endm
21281 +
21282 +#ifdef CONFIG_PAX_KERNEXEC
21283 +ENTRY(pax_enter_kernel)
21284 +#ifdef CONFIG_PARAVIRT
21285 + pushl %eax
21286 + pushl %ecx
21287 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
21288 + mov %eax, %esi
21289 +#else
21290 + mov %cr0, %esi
21291 +#endif
21292 + bts $16, %esi
21293 + jnc 1f
21294 + mov %cs, %esi
21295 + cmp $__KERNEL_CS, %esi
21296 + jz 3f
21297 + ljmp $__KERNEL_CS, $3f
21298 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
21299 +2:
21300 +#ifdef CONFIG_PARAVIRT
21301 + mov %esi, %eax
21302 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
21303 +#else
21304 + mov %esi, %cr0
21305 +#endif
21306 +3:
21307 +#ifdef CONFIG_PARAVIRT
21308 + popl %ecx
21309 + popl %eax
21310 +#endif
21311 + ret
21312 +ENDPROC(pax_enter_kernel)
21313 +
21314 +ENTRY(pax_exit_kernel)
21315 +#ifdef CONFIG_PARAVIRT
21316 + pushl %eax
21317 + pushl %ecx
21318 +#endif
21319 + mov %cs, %esi
21320 + cmp $__KERNEXEC_KERNEL_CS, %esi
21321 + jnz 2f
21322 +#ifdef CONFIG_PARAVIRT
21323 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
21324 + mov %eax, %esi
21325 +#else
21326 + mov %cr0, %esi
21327 +#endif
21328 + btr $16, %esi
21329 + ljmp $__KERNEL_CS, $1f
21330 +1:
21331 +#ifdef CONFIG_PARAVIRT
21332 + mov %esi, %eax
21333 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
21334 +#else
21335 + mov %esi, %cr0
21336 +#endif
21337 +2:
21338 +#ifdef CONFIG_PARAVIRT
21339 + popl %ecx
21340 + popl %eax
21341 +#endif
21342 + ret
21343 +ENDPROC(pax_exit_kernel)
21344 +#endif
21345 +
21346 + .macro pax_erase_kstack
21347 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21348 + call pax_erase_kstack
21349 +#endif
21350 + .endm
21351 +
21352 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
21353 +/*
21354 + * ebp: thread_info
21355 + */
21356 +ENTRY(pax_erase_kstack)
21357 + pushl %edi
21358 + pushl %ecx
21359 + pushl %eax
21360 +
21361 + mov TI_lowest_stack(%ebp), %edi
21362 + mov $-0xBEEF, %eax
21363 + std
21364 +
21365 +1: mov %edi, %ecx
21366 + and $THREAD_SIZE_asm - 1, %ecx
21367 + shr $2, %ecx
21368 + repne scasl
21369 + jecxz 2f
21370 +
21371 + cmp $2*16, %ecx
21372 + jc 2f
21373 +
21374 + mov $2*16, %ecx
21375 + repe scasl
21376 + jecxz 2f
21377 + jne 1b
21378 +
21379 +2: cld
21380 + mov %esp, %ecx
21381 + sub %edi, %ecx
21382 +
21383 + cmp $THREAD_SIZE_asm, %ecx
21384 + jb 3f
21385 + ud2
21386 +3:
21387 +
21388 + shr $2, %ecx
21389 + rep stosl
21390 +
21391 + mov TI_task_thread_sp0(%ebp), %edi
21392 + sub $128, %edi
21393 + mov %edi, TI_lowest_stack(%ebp)
21394 +
21395 + popl %eax
21396 + popl %ecx
21397 + popl %edi
21398 + ret
21399 +ENDPROC(pax_erase_kstack)
21400 +#endif
21401 +
21402 +.macro __SAVE_ALL _DS
21403 cld
21404 PUSH_GS
21405 pushl_cfi %fs
21406 @@ -206,7 +346,7 @@
21407 CFI_REL_OFFSET ecx, 0
21408 pushl_cfi %ebx
21409 CFI_REL_OFFSET ebx, 0
21410 - movl $(__USER_DS), %edx
21411 + movl $\_DS, %edx
21412 movl %edx, %ds
21413 movl %edx, %es
21414 movl $(__KERNEL_PERCPU), %edx
21415 @@ -214,6 +354,15 @@
21416 SET_KERNEL_GS %edx
21417 .endm
21418
21419 +.macro SAVE_ALL
21420 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21421 + __SAVE_ALL __KERNEL_DS
21422 + pax_enter_kernel
21423 +#else
21424 + __SAVE_ALL __USER_DS
21425 +#endif
21426 +.endm
21427 +
21428 .macro RESTORE_INT_REGS
21429 popl_cfi %ebx
21430 CFI_RESTORE ebx
21431 @@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
21432 popfl_cfi
21433 jmp syscall_exit
21434 CFI_ENDPROC
21435 -END(ret_from_fork)
21436 +ENDPROC(ret_from_fork)
21437
21438 ENTRY(ret_from_kernel_thread)
21439 CFI_STARTPROC
21440 @@ -344,7 +493,15 @@ ret_from_intr:
21441 andl $SEGMENT_RPL_MASK, %eax
21442 #endif
21443 cmpl $USER_RPL, %eax
21444 +
21445 +#ifdef CONFIG_PAX_KERNEXEC
21446 + jae resume_userspace
21447 +
21448 + pax_exit_kernel
21449 + jmp resume_kernel
21450 +#else
21451 jb resume_kernel # not returning to v8086 or userspace
21452 +#endif
21453
21454 ENTRY(resume_userspace)
21455 LOCKDEP_SYS_EXIT
21456 @@ -356,8 +513,8 @@ ENTRY(resume_userspace)
21457 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
21458 # int/exception return?
21459 jne work_pending
21460 - jmp restore_all
21461 -END(ret_from_exception)
21462 + jmp restore_all_pax
21463 +ENDPROC(ret_from_exception)
21464
21465 #ifdef CONFIG_PREEMPT
21466 ENTRY(resume_kernel)
21467 @@ -372,7 +529,7 @@ need_resched:
21468 jz restore_all
21469 call preempt_schedule_irq
21470 jmp need_resched
21471 -END(resume_kernel)
21472 +ENDPROC(resume_kernel)
21473 #endif
21474 CFI_ENDPROC
21475 /*
21476 @@ -406,30 +563,45 @@ sysenter_past_esp:
21477 /*CFI_REL_OFFSET cs, 0*/
21478 /*
21479 * Push current_thread_info()->sysenter_return to the stack.
21480 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
21481 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
21482 */
21483 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
21484 + pushl_cfi $0
21485 CFI_REL_OFFSET eip, 0
21486
21487 pushl_cfi %eax
21488 SAVE_ALL
21489 + GET_THREAD_INFO(%ebp)
21490 + movl TI_sysenter_return(%ebp),%ebp
21491 + movl %ebp,PT_EIP(%esp)
21492 ENABLE_INTERRUPTS(CLBR_NONE)
21493
21494 /*
21495 * Load the potential sixth argument from user stack.
21496 * Careful about security.
21497 */
21498 + movl PT_OLDESP(%esp),%ebp
21499 +
21500 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21501 + mov PT_OLDSS(%esp),%ds
21502 +1: movl %ds:(%ebp),%ebp
21503 + push %ss
21504 + pop %ds
21505 +#else
21506 cmpl $__PAGE_OFFSET-3,%ebp
21507 jae syscall_fault
21508 ASM_STAC
21509 1: movl (%ebp),%ebp
21510 ASM_CLAC
21511 +#endif
21512 +
21513 movl %ebp,PT_EBP(%esp)
21514 _ASM_EXTABLE(1b,syscall_fault)
21515
21516 GET_THREAD_INFO(%ebp)
21517
21518 +#ifdef CONFIG_PAX_RANDKSTACK
21519 + pax_erase_kstack
21520 +#endif
21521 +
21522 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
21523 jnz sysenter_audit
21524 sysenter_do_call:
21525 @@ -444,12 +616,24 @@ sysenter_do_call:
21526 testl $_TIF_ALLWORK_MASK, %ecx
21527 jne sysexit_audit
21528 sysenter_exit:
21529 +
21530 +#ifdef CONFIG_PAX_RANDKSTACK
21531 + pushl_cfi %eax
21532 + movl %esp, %eax
21533 + call pax_randomize_kstack
21534 + popl_cfi %eax
21535 +#endif
21536 +
21537 + pax_erase_kstack
21538 +
21539 /* if something modifies registers it must also disable sysexit */
21540 movl PT_EIP(%esp), %edx
21541 movl PT_OLDESP(%esp), %ecx
21542 xorl %ebp,%ebp
21543 TRACE_IRQS_ON
21544 1: mov PT_FS(%esp), %fs
21545 +2: mov PT_DS(%esp), %ds
21546 +3: mov PT_ES(%esp), %es
21547 PTGS_TO_GS
21548 ENABLE_INTERRUPTS_SYSEXIT
21549
21550 @@ -466,6 +650,9 @@ sysenter_audit:
21551 movl %eax,%edx /* 2nd arg: syscall number */
21552 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
21553 call __audit_syscall_entry
21554 +
21555 + pax_erase_kstack
21556 +
21557 pushl_cfi %ebx
21558 movl PT_EAX(%esp),%eax /* reload syscall number */
21559 jmp sysenter_do_call
21560 @@ -491,10 +678,16 @@ sysexit_audit:
21561
21562 CFI_ENDPROC
21563 .pushsection .fixup,"ax"
21564 -2: movl $0,PT_FS(%esp)
21565 +4: movl $0,PT_FS(%esp)
21566 + jmp 1b
21567 +5: movl $0,PT_DS(%esp)
21568 + jmp 1b
21569 +6: movl $0,PT_ES(%esp)
21570 jmp 1b
21571 .popsection
21572 - _ASM_EXTABLE(1b,2b)
21573 + _ASM_EXTABLE(1b,4b)
21574 + _ASM_EXTABLE(2b,5b)
21575 + _ASM_EXTABLE(3b,6b)
21576 PTGS_TO_GS_EX
21577 ENDPROC(ia32_sysenter_target)
21578
21579 @@ -509,6 +702,11 @@ ENTRY(system_call)
21580 pushl_cfi %eax # save orig_eax
21581 SAVE_ALL
21582 GET_THREAD_INFO(%ebp)
21583 +
21584 +#ifdef CONFIG_PAX_RANDKSTACK
21585 + pax_erase_kstack
21586 +#endif
21587 +
21588 # system call tracing in operation / emulation
21589 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
21590 jnz syscall_trace_entry
21591 @@ -527,6 +725,15 @@ syscall_exit:
21592 testl $_TIF_ALLWORK_MASK, %ecx # current->work
21593 jne syscall_exit_work
21594
21595 +restore_all_pax:
21596 +
21597 +#ifdef CONFIG_PAX_RANDKSTACK
21598 + movl %esp, %eax
21599 + call pax_randomize_kstack
21600 +#endif
21601 +
21602 + pax_erase_kstack
21603 +
21604 restore_all:
21605 TRACE_IRQS_IRET
21606 restore_all_notrace:
21607 @@ -583,14 +790,34 @@ ldt_ss:
21608 * compensating for the offset by changing to the ESPFIX segment with
21609 * a base address that matches for the difference.
21610 */
21611 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
21612 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
21613 mov %esp, %edx /* load kernel esp */
21614 mov PT_OLDESP(%esp), %eax /* load userspace esp */
21615 mov %dx, %ax /* eax: new kernel esp */
21616 sub %eax, %edx /* offset (low word is 0) */
21617 +#ifdef CONFIG_SMP
21618 + movl PER_CPU_VAR(cpu_number), %ebx
21619 + shll $PAGE_SHIFT_asm, %ebx
21620 + addl $cpu_gdt_table, %ebx
21621 +#else
21622 + movl $cpu_gdt_table, %ebx
21623 +#endif
21624 shr $16, %edx
21625 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
21626 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
21627 +
21628 +#ifdef CONFIG_PAX_KERNEXEC
21629 + mov %cr0, %esi
21630 + btr $16, %esi
21631 + mov %esi, %cr0
21632 +#endif
21633 +
21634 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
21635 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
21636 +
21637 +#ifdef CONFIG_PAX_KERNEXEC
21638 + bts $16, %esi
21639 + mov %esi, %cr0
21640 +#endif
21641 +
21642 pushl_cfi $__ESPFIX_SS
21643 pushl_cfi %eax /* new kernel esp */
21644 /* Disable interrupts, but do not irqtrace this section: we
21645 @@ -619,20 +846,18 @@ work_resched:
21646 movl TI_flags(%ebp), %ecx
21647 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
21648 # than syscall tracing?
21649 - jz restore_all
21650 + jz restore_all_pax
21651 testb $_TIF_NEED_RESCHED, %cl
21652 jnz work_resched
21653
21654 work_notifysig: # deal with pending signals and
21655 # notify-resume requests
21656 + movl %esp, %eax
21657 #ifdef CONFIG_VM86
21658 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
21659 - movl %esp, %eax
21660 jne work_notifysig_v86 # returning to kernel-space or
21661 # vm86-space
21662 1:
21663 -#else
21664 - movl %esp, %eax
21665 #endif
21666 TRACE_IRQS_ON
21667 ENABLE_INTERRUPTS(CLBR_NONE)
21668 @@ -653,7 +878,7 @@ work_notifysig_v86:
21669 movl %eax, %esp
21670 jmp 1b
21671 #endif
21672 -END(work_pending)
21673 +ENDPROC(work_pending)
21674
21675 # perform syscall exit tracing
21676 ALIGN
21677 @@ -661,11 +886,14 @@ syscall_trace_entry:
21678 movl $-ENOSYS,PT_EAX(%esp)
21679 movl %esp, %eax
21680 call syscall_trace_enter
21681 +
21682 + pax_erase_kstack
21683 +
21684 /* What it returned is what we'll actually use. */
21685 cmpl $(NR_syscalls), %eax
21686 jnae syscall_call
21687 jmp syscall_exit
21688 -END(syscall_trace_entry)
21689 +ENDPROC(syscall_trace_entry)
21690
21691 # perform syscall exit tracing
21692 ALIGN
21693 @@ -678,21 +906,25 @@ syscall_exit_work:
21694 movl %esp, %eax
21695 call syscall_trace_leave
21696 jmp resume_userspace
21697 -END(syscall_exit_work)
21698 +ENDPROC(syscall_exit_work)
21699 CFI_ENDPROC
21700
21701 RING0_INT_FRAME # can't unwind into user space anyway
21702 syscall_fault:
21703 +#ifdef CONFIG_PAX_MEMORY_UDEREF
21704 + push %ss
21705 + pop %ds
21706 +#endif
21707 ASM_CLAC
21708 GET_THREAD_INFO(%ebp)
21709 movl $-EFAULT,PT_EAX(%esp)
21710 jmp resume_userspace
21711 -END(syscall_fault)
21712 +ENDPROC(syscall_fault)
21713
21714 syscall_badsys:
21715 movl $-ENOSYS,PT_EAX(%esp)
21716 jmp resume_userspace
21717 -END(syscall_badsys)
21718 +ENDPROC(syscall_badsys)
21719 CFI_ENDPROC
21720 /*
21721 * End of kprobes section
21722 @@ -708,8 +940,15 @@ END(syscall_badsys)
21723 * normal stack and adjusts ESP with the matching offset.
21724 */
21725 /* fixup the stack */
21726 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
21727 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
21728 +#ifdef CONFIG_SMP
21729 + movl PER_CPU_VAR(cpu_number), %ebx
21730 + shll $PAGE_SHIFT_asm, %ebx
21731 + addl $cpu_gdt_table, %ebx
21732 +#else
21733 + movl $cpu_gdt_table, %ebx
21734 +#endif
21735 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
21736 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
21737 shl $16, %eax
21738 addl %esp, %eax /* the adjusted stack pointer */
21739 pushl_cfi $__KERNEL_DS
21740 @@ -762,7 +1001,7 @@ vector=vector+1
21741 .endr
21742 2: jmp common_interrupt
21743 .endr
21744 -END(irq_entries_start)
21745 +ENDPROC(irq_entries_start)
21746
21747 .previous
21748 END(interrupt)
21749 @@ -823,7 +1062,7 @@ ENTRY(coprocessor_error)
21750 pushl_cfi $do_coprocessor_error
21751 jmp error_code
21752 CFI_ENDPROC
21753 -END(coprocessor_error)
21754 +ENDPROC(coprocessor_error)
21755
21756 ENTRY(simd_coprocessor_error)
21757 RING0_INT_FRAME
21758 @@ -836,7 +1075,7 @@ ENTRY(simd_coprocessor_error)
21759 .section .altinstructions,"a"
21760 altinstruction_entry 661b, 663f, X86_FEATURE_XMM, 662b-661b, 664f-663f
21761 .previous
21762 -.section .altinstr_replacement,"ax"
21763 +.section .altinstr_replacement,"a"
21764 663: pushl $do_simd_coprocessor_error
21765 664:
21766 .previous
21767 @@ -845,7 +1084,7 @@ ENTRY(simd_coprocessor_error)
21768 #endif
21769 jmp error_code
21770 CFI_ENDPROC
21771 -END(simd_coprocessor_error)
21772 +ENDPROC(simd_coprocessor_error)
21773
21774 ENTRY(device_not_available)
21775 RING0_INT_FRAME
21776 @@ -854,18 +1093,18 @@ ENTRY(device_not_available)
21777 pushl_cfi $do_device_not_available
21778 jmp error_code
21779 CFI_ENDPROC
21780 -END(device_not_available)
21781 +ENDPROC(device_not_available)
21782
21783 #ifdef CONFIG_PARAVIRT
21784 ENTRY(native_iret)
21785 iret
21786 _ASM_EXTABLE(native_iret, iret_exc)
21787 -END(native_iret)
21788 +ENDPROC(native_iret)
21789
21790 ENTRY(native_irq_enable_sysexit)
21791 sti
21792 sysexit
21793 -END(native_irq_enable_sysexit)
21794 +ENDPROC(native_irq_enable_sysexit)
21795 #endif
21796
21797 ENTRY(overflow)
21798 @@ -875,7 +1114,7 @@ ENTRY(overflow)
21799 pushl_cfi $do_overflow
21800 jmp error_code
21801 CFI_ENDPROC
21802 -END(overflow)
21803 +ENDPROC(overflow)
21804
21805 ENTRY(bounds)
21806 RING0_INT_FRAME
21807 @@ -884,7 +1123,7 @@ ENTRY(bounds)
21808 pushl_cfi $do_bounds
21809 jmp error_code
21810 CFI_ENDPROC
21811 -END(bounds)
21812 +ENDPROC(bounds)
21813
21814 ENTRY(invalid_op)
21815 RING0_INT_FRAME
21816 @@ -893,7 +1132,7 @@ ENTRY(invalid_op)
21817 pushl_cfi $do_invalid_op
21818 jmp error_code
21819 CFI_ENDPROC
21820 -END(invalid_op)
21821 +ENDPROC(invalid_op)
21822
21823 ENTRY(coprocessor_segment_overrun)
21824 RING0_INT_FRAME
21825 @@ -902,7 +1141,7 @@ ENTRY(coprocessor_segment_overrun)
21826 pushl_cfi $do_coprocessor_segment_overrun
21827 jmp error_code
21828 CFI_ENDPROC
21829 -END(coprocessor_segment_overrun)
21830 +ENDPROC(coprocessor_segment_overrun)
21831
21832 ENTRY(invalid_TSS)
21833 RING0_EC_FRAME
21834 @@ -910,7 +1149,7 @@ ENTRY(invalid_TSS)
21835 pushl_cfi $do_invalid_TSS
21836 jmp error_code
21837 CFI_ENDPROC
21838 -END(invalid_TSS)
21839 +ENDPROC(invalid_TSS)
21840
21841 ENTRY(segment_not_present)
21842 RING0_EC_FRAME
21843 @@ -918,7 +1157,7 @@ ENTRY(segment_not_present)
21844 pushl_cfi $do_segment_not_present
21845 jmp error_code
21846 CFI_ENDPROC
21847 -END(segment_not_present)
21848 +ENDPROC(segment_not_present)
21849
21850 ENTRY(stack_segment)
21851 RING0_EC_FRAME
21852 @@ -926,7 +1165,7 @@ ENTRY(stack_segment)
21853 pushl_cfi $do_stack_segment
21854 jmp error_code
21855 CFI_ENDPROC
21856 -END(stack_segment)
21857 +ENDPROC(stack_segment)
21858
21859 ENTRY(alignment_check)
21860 RING0_EC_FRAME
21861 @@ -934,7 +1173,7 @@ ENTRY(alignment_check)
21862 pushl_cfi $do_alignment_check
21863 jmp error_code
21864 CFI_ENDPROC
21865 -END(alignment_check)
21866 +ENDPROC(alignment_check)
21867
21868 ENTRY(divide_error)
21869 RING0_INT_FRAME
21870 @@ -943,7 +1182,7 @@ ENTRY(divide_error)
21871 pushl_cfi $do_divide_error
21872 jmp error_code
21873 CFI_ENDPROC
21874 -END(divide_error)
21875 +ENDPROC(divide_error)
21876
21877 #ifdef CONFIG_X86_MCE
21878 ENTRY(machine_check)
21879 @@ -953,7 +1192,7 @@ ENTRY(machine_check)
21880 pushl_cfi machine_check_vector
21881 jmp error_code
21882 CFI_ENDPROC
21883 -END(machine_check)
21884 +ENDPROC(machine_check)
21885 #endif
21886
21887 ENTRY(spurious_interrupt_bug)
21888 @@ -963,7 +1202,7 @@ ENTRY(spurious_interrupt_bug)
21889 pushl_cfi $do_spurious_interrupt_bug
21890 jmp error_code
21891 CFI_ENDPROC
21892 -END(spurious_interrupt_bug)
21893 +ENDPROC(spurious_interrupt_bug)
21894 /*
21895 * End of kprobes section
21896 */
21897 @@ -1073,7 +1312,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
21898
21899 ENTRY(mcount)
21900 ret
21901 -END(mcount)
21902 +ENDPROC(mcount)
21903
21904 ENTRY(ftrace_caller)
21905 cmpl $0, function_trace_stop
21906 @@ -1106,7 +1345,7 @@ ftrace_graph_call:
21907 .globl ftrace_stub
21908 ftrace_stub:
21909 ret
21910 -END(ftrace_caller)
21911 +ENDPROC(ftrace_caller)
21912
21913 ENTRY(ftrace_regs_caller)
21914 pushf /* push flags before compare (in cs location) */
21915 @@ -1210,7 +1449,7 @@ trace:
21916 popl %ecx
21917 popl %eax
21918 jmp ftrace_stub
21919 -END(mcount)
21920 +ENDPROC(mcount)
21921 #endif /* CONFIG_DYNAMIC_FTRACE */
21922 #endif /* CONFIG_FUNCTION_TRACER */
21923
21924 @@ -1228,7 +1467,7 @@ ENTRY(ftrace_graph_caller)
21925 popl %ecx
21926 popl %eax
21927 ret
21928 -END(ftrace_graph_caller)
21929 +ENDPROC(ftrace_graph_caller)
21930
21931 .globl return_to_handler
21932 return_to_handler:
21933 @@ -1284,15 +1523,18 @@ error_code:
21934 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
21935 REG_TO_PTGS %ecx
21936 SET_KERNEL_GS %ecx
21937 - movl $(__USER_DS), %ecx
21938 + movl $(__KERNEL_DS), %ecx
21939 movl %ecx, %ds
21940 movl %ecx, %es
21941 +
21942 + pax_enter_kernel
21943 +
21944 TRACE_IRQS_OFF
21945 movl %esp,%eax # pt_regs pointer
21946 call *%edi
21947 jmp ret_from_exception
21948 CFI_ENDPROC
21949 -END(page_fault)
21950 +ENDPROC(page_fault)
21951
21952 /*
21953 * Debug traps and NMI can happen at the one SYSENTER instruction
21954 @@ -1335,7 +1577,7 @@ debug_stack_correct:
21955 call do_debug
21956 jmp ret_from_exception
21957 CFI_ENDPROC
21958 -END(debug)
21959 +ENDPROC(debug)
21960
21961 /*
21962 * NMI is doubly nasty. It can happen _while_ we're handling
21963 @@ -1373,6 +1615,9 @@ nmi_stack_correct:
21964 xorl %edx,%edx # zero error code
21965 movl %esp,%eax # pt_regs pointer
21966 call do_nmi
21967 +
21968 + pax_exit_kernel
21969 +
21970 jmp restore_all_notrace
21971 CFI_ENDPROC
21972
21973 @@ -1409,12 +1654,15 @@ nmi_espfix_stack:
21974 FIXUP_ESPFIX_STACK # %eax == %esp
21975 xorl %edx,%edx # zero error code
21976 call do_nmi
21977 +
21978 + pax_exit_kernel
21979 +
21980 RESTORE_REGS
21981 lss 12+4(%esp), %esp # back to espfix stack
21982 CFI_ADJUST_CFA_OFFSET -24
21983 jmp irq_return
21984 CFI_ENDPROC
21985 -END(nmi)
21986 +ENDPROC(nmi)
21987
21988 ENTRY(int3)
21989 RING0_INT_FRAME
21990 @@ -1427,14 +1675,14 @@ ENTRY(int3)
21991 call do_int3
21992 jmp ret_from_exception
21993 CFI_ENDPROC
21994 -END(int3)
21995 +ENDPROC(int3)
21996
21997 ENTRY(general_protection)
21998 RING0_EC_FRAME
21999 pushl_cfi $do_general_protection
22000 jmp error_code
22001 CFI_ENDPROC
22002 -END(general_protection)
22003 +ENDPROC(general_protection)
22004
22005 #ifdef CONFIG_KVM_GUEST
22006 ENTRY(async_page_fault)
22007 @@ -1443,7 +1691,7 @@ ENTRY(async_page_fault)
22008 pushl_cfi $do_async_page_fault
22009 jmp error_code
22010 CFI_ENDPROC
22011 -END(async_page_fault)
22012 +ENDPROC(async_page_fault)
22013 #endif
22014
22015 /*
22016 diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
22017 index b077f4c..8e0df9f 100644
22018 --- a/arch/x86/kernel/entry_64.S
22019 +++ b/arch/x86/kernel/entry_64.S
22020 @@ -59,6 +59,8 @@
22021 #include <asm/context_tracking.h>
22022 #include <asm/smap.h>
22023 #include <linux/err.h>
22024 +#include <asm/pgtable.h>
22025 +#include <asm/alternative-asm.h>
22026
22027 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
22028 #include <linux/elf-em.h>
22029 @@ -80,8 +82,9 @@
22030 #ifdef CONFIG_DYNAMIC_FTRACE
22031
22032 ENTRY(function_hook)
22033 + pax_force_retaddr
22034 retq
22035 -END(function_hook)
22036 +ENDPROC(function_hook)
22037
22038 /* skip is set if stack has been adjusted */
22039 .macro ftrace_caller_setup skip=0
22040 @@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
22041 #endif
22042
22043 GLOBAL(ftrace_stub)
22044 + pax_force_retaddr
22045 retq
22046 -END(ftrace_caller)
22047 +ENDPROC(ftrace_caller)
22048
22049 ENTRY(ftrace_regs_caller)
22050 /* Save the current flags before compare (in SS location)*/
22051 @@ -191,7 +195,7 @@ ftrace_restore_flags:
22052 popfq
22053 jmp ftrace_stub
22054
22055 -END(ftrace_regs_caller)
22056 +ENDPROC(ftrace_regs_caller)
22057
22058
22059 #else /* ! CONFIG_DYNAMIC_FTRACE */
22060 @@ -212,6 +216,7 @@ ENTRY(function_hook)
22061 #endif
22062
22063 GLOBAL(ftrace_stub)
22064 + pax_force_retaddr
22065 retq
22066
22067 trace:
22068 @@ -225,12 +230,13 @@ trace:
22069 #endif
22070 subq $MCOUNT_INSN_SIZE, %rdi
22071
22072 + pax_force_fptr ftrace_trace_function
22073 call *ftrace_trace_function
22074
22075 MCOUNT_RESTORE_FRAME
22076
22077 jmp ftrace_stub
22078 -END(function_hook)
22079 +ENDPROC(function_hook)
22080 #endif /* CONFIG_DYNAMIC_FTRACE */
22081 #endif /* CONFIG_FUNCTION_TRACER */
22082
22083 @@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
22084
22085 MCOUNT_RESTORE_FRAME
22086
22087 + pax_force_retaddr
22088 retq
22089 -END(ftrace_graph_caller)
22090 +ENDPROC(ftrace_graph_caller)
22091
22092 GLOBAL(return_to_handler)
22093 subq $24, %rsp
22094 @@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
22095 movq 8(%rsp), %rdx
22096 movq (%rsp), %rax
22097 addq $24, %rsp
22098 + pax_force_fptr %rdi
22099 jmp *%rdi
22100 +ENDPROC(return_to_handler)
22101 #endif
22102
22103
22104 @@ -284,6 +293,430 @@ ENTRY(native_usergs_sysret64)
22105 ENDPROC(native_usergs_sysret64)
22106 #endif /* CONFIG_PARAVIRT */
22107
22108 + .macro ljmpq sel, off
22109 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
22110 + .byte 0x48; ljmp *1234f(%rip)
22111 + .pushsection .rodata
22112 + .align 16
22113 + 1234: .quad \off; .word \sel
22114 + .popsection
22115 +#else
22116 + pushq $\sel
22117 + pushq $\off
22118 + lretq
22119 +#endif
22120 + .endm
22121 +
22122 + .macro pax_enter_kernel
22123 + pax_set_fptr_mask
22124 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22125 + call pax_enter_kernel
22126 +#endif
22127 + .endm
22128 +
22129 + .macro pax_exit_kernel
22130 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22131 + call pax_exit_kernel
22132 +#endif
22133 +
22134 + .endm
22135 +
22136 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22137 +ENTRY(pax_enter_kernel)
22138 + pushq %rdi
22139 +
22140 +#ifdef CONFIG_PARAVIRT
22141 + PV_SAVE_REGS(CLBR_RDI)
22142 +#endif
22143 +
22144 +#ifdef CONFIG_PAX_KERNEXEC
22145 + GET_CR0_INTO_RDI
22146 + bts $16,%rdi
22147 + jnc 3f
22148 + mov %cs,%edi
22149 + cmp $__KERNEL_CS,%edi
22150 + jnz 2f
22151 +1:
22152 +#endif
22153 +
22154 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22155 + 661: jmp 111f
22156 + .pushsection .altinstr_replacement, "a"
22157 + 662: ASM_NOP2
22158 + .popsection
22159 + .pushsection .altinstructions, "a"
22160 + altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22161 + .popsection
22162 + GET_CR3_INTO_RDI
22163 + cmp $0,%dil
22164 + jnz 112f
22165 + mov $__KERNEL_DS,%edi
22166 + mov %edi,%ss
22167 + jmp 111f
22168 +112: cmp $1,%dil
22169 + jz 113f
22170 + ud2
22171 +113: sub $4097,%rdi
22172 + bts $63,%rdi
22173 + SET_RDI_INTO_CR3
22174 + mov $__UDEREF_KERNEL_DS,%edi
22175 + mov %edi,%ss
22176 +111:
22177 +#endif
22178 +
22179 +#ifdef CONFIG_PARAVIRT
22180 + PV_RESTORE_REGS(CLBR_RDI)
22181 +#endif
22182 +
22183 + popq %rdi
22184 + pax_force_retaddr
22185 + retq
22186 +
22187 +#ifdef CONFIG_PAX_KERNEXEC
22188 +2: ljmpq __KERNEL_CS,1b
22189 +3: ljmpq __KERNEXEC_KERNEL_CS,4f
22190 +4: SET_RDI_INTO_CR0
22191 + jmp 1b
22192 +#endif
22193 +ENDPROC(pax_enter_kernel)
22194 +
22195 +ENTRY(pax_exit_kernel)
22196 + pushq %rdi
22197 +
22198 +#ifdef CONFIG_PARAVIRT
22199 + PV_SAVE_REGS(CLBR_RDI)
22200 +#endif
22201 +
22202 +#ifdef CONFIG_PAX_KERNEXEC
22203 + mov %cs,%rdi
22204 + cmp $__KERNEXEC_KERNEL_CS,%edi
22205 + jz 2f
22206 + GET_CR0_INTO_RDI
22207 + bts $16,%rdi
22208 + jnc 4f
22209 +1:
22210 +#endif
22211 +
22212 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22213 + 661: jmp 111f
22214 + .pushsection .altinstr_replacement, "a"
22215 + 662: ASM_NOP2
22216 + .popsection
22217 + .pushsection .altinstructions, "a"
22218 + altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22219 + .popsection
22220 + mov %ss,%edi
22221 + cmp $__UDEREF_KERNEL_DS,%edi
22222 + jnz 111f
22223 + GET_CR3_INTO_RDI
22224 + cmp $0,%dil
22225 + jz 112f
22226 + ud2
22227 +112: add $4097,%rdi
22228 + bts $63,%rdi
22229 + SET_RDI_INTO_CR3
22230 + mov $__KERNEL_DS,%edi
22231 + mov %edi,%ss
22232 +111:
22233 +#endif
22234 +
22235 +#ifdef CONFIG_PARAVIRT
22236 + PV_RESTORE_REGS(CLBR_RDI);
22237 +#endif
22238 +
22239 + popq %rdi
22240 + pax_force_retaddr
22241 + retq
22242 +
22243 +#ifdef CONFIG_PAX_KERNEXEC
22244 +2: GET_CR0_INTO_RDI
22245 + btr $16,%rdi
22246 + jnc 4f
22247 + ljmpq __KERNEL_CS,3f
22248 +3: SET_RDI_INTO_CR0
22249 + jmp 1b
22250 +4: ud2
22251 + jmp 4b
22252 +#endif
22253 +ENDPROC(pax_exit_kernel)
22254 +#endif
22255 +
22256 + .macro pax_enter_kernel_user
22257 + pax_set_fptr_mask
22258 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22259 + call pax_enter_kernel_user
22260 +#endif
22261 + .endm
22262 +
22263 + .macro pax_exit_kernel_user
22264 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22265 + call pax_exit_kernel_user
22266 +#endif
22267 +#ifdef CONFIG_PAX_RANDKSTACK
22268 + pushq %rax
22269 + pushq %r11
22270 + call pax_randomize_kstack
22271 + popq %r11
22272 + popq %rax
22273 +#endif
22274 + .endm
22275 +
22276 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22277 +ENTRY(pax_enter_kernel_user)
22278 + pushq %rdi
22279 + pushq %rbx
22280 +
22281 +#ifdef CONFIG_PARAVIRT
22282 + PV_SAVE_REGS(CLBR_RDI)
22283 +#endif
22284 +
22285 + 661: jmp 111f
22286 + .pushsection .altinstr_replacement, "a"
22287 + 662: ASM_NOP2
22288 + .popsection
22289 + .pushsection .altinstructions, "a"
22290 + altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22291 + .popsection
22292 + GET_CR3_INTO_RDI
22293 + cmp $1,%dil
22294 + jnz 4f
22295 + sub $4097,%rdi
22296 + bts $63,%rdi
22297 + SET_RDI_INTO_CR3
22298 + jmp 3f
22299 +111:
22300 +
22301 + GET_CR3_INTO_RDI
22302 + mov %rdi,%rbx
22303 + add $__START_KERNEL_map,%rbx
22304 + sub phys_base(%rip),%rbx
22305 +
22306 +#ifdef CONFIG_PARAVIRT
22307 + cmpl $0, pv_info+PARAVIRT_enabled
22308 + jz 1f
22309 + pushq %rdi
22310 + i = 0
22311 + .rept USER_PGD_PTRS
22312 + mov i*8(%rbx),%rsi
22313 + mov $0,%sil
22314 + lea i*8(%rbx),%rdi
22315 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
22316 + i = i + 1
22317 + .endr
22318 + popq %rdi
22319 + jmp 2f
22320 +1:
22321 +#endif
22322 +
22323 + i = 0
22324 + .rept USER_PGD_PTRS
22325 + movb $0,i*8(%rbx)
22326 + i = i + 1
22327 + .endr
22328 +
22329 +2: SET_RDI_INTO_CR3
22330 +
22331 +#ifdef CONFIG_PAX_KERNEXEC
22332 + GET_CR0_INTO_RDI
22333 + bts $16,%rdi
22334 + SET_RDI_INTO_CR0
22335 +#endif
22336 +
22337 +3:
22338 +
22339 +#ifdef CONFIG_PARAVIRT
22340 + PV_RESTORE_REGS(CLBR_RDI)
22341 +#endif
22342 +
22343 + popq %rbx
22344 + popq %rdi
22345 + pax_force_retaddr
22346 + retq
22347 +4: ud2
22348 +ENDPROC(pax_enter_kernel_user)
22349 +
22350 +ENTRY(pax_exit_kernel_user)
22351 + pushq %rdi
22352 + pushq %rbx
22353 +
22354 +#ifdef CONFIG_PARAVIRT
22355 + PV_SAVE_REGS(CLBR_RDI)
22356 +#endif
22357 +
22358 + GET_CR3_INTO_RDI
22359 + 661: jmp 1f
22360 + .pushsection .altinstr_replacement, "a"
22361 + 662: ASM_NOP2
22362 + .popsection
22363 + .pushsection .altinstructions, "a"
22364 + altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22365 + .popsection
22366 + cmp $0,%dil
22367 + jnz 3f
22368 + add $4097,%rdi
22369 + bts $63,%rdi
22370 + SET_RDI_INTO_CR3
22371 + jmp 2f
22372 +1:
22373 +
22374 + mov %rdi,%rbx
22375 +
22376 +#ifdef CONFIG_PAX_KERNEXEC
22377 + GET_CR0_INTO_RDI
22378 + btr $16,%rdi
22379 + jnc 3f
22380 + SET_RDI_INTO_CR0
22381 +#endif
22382 +
22383 + add $__START_KERNEL_map,%rbx
22384 + sub phys_base(%rip),%rbx
22385 +
22386 +#ifdef CONFIG_PARAVIRT
22387 + cmpl $0, pv_info+PARAVIRT_enabled
22388 + jz 1f
22389 + i = 0
22390 + .rept USER_PGD_PTRS
22391 + mov i*8(%rbx),%rsi
22392 + mov $0x67,%sil
22393 + lea i*8(%rbx),%rdi
22394 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
22395 + i = i + 1
22396 + .endr
22397 + jmp 2f
22398 +1:
22399 +#endif
22400 +
22401 + i = 0
22402 + .rept USER_PGD_PTRS
22403 + movb $0x67,i*8(%rbx)
22404 + i = i + 1
22405 + .endr
22406 +2:
22407 +
22408 +#ifdef CONFIG_PARAVIRT
22409 + PV_RESTORE_REGS(CLBR_RDI)
22410 +#endif
22411 +
22412 + popq %rbx
22413 + popq %rdi
22414 + pax_force_retaddr
22415 + retq
22416 +3: ud2
22417 +ENDPROC(pax_exit_kernel_user)
22418 +#endif
22419 +
22420 + .macro pax_enter_kernel_nmi
22421 + pax_set_fptr_mask
22422 +
22423 +#ifdef CONFIG_PAX_KERNEXEC
22424 + GET_CR0_INTO_RDI
22425 + bts $16,%rdi
22426 + jc 110f
22427 + SET_RDI_INTO_CR0
22428 + or $2,%ebx
22429 +110:
22430 +#endif
22431 +
22432 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22433 + 661: jmp 111f
22434 + .pushsection .altinstr_replacement, "a"
22435 + 662: ASM_NOP2
22436 + .popsection
22437 + .pushsection .altinstructions, "a"
22438 + altinstruction_entry 661b, 662b, X86_FEATURE_PCID, 2, 2
22439 + .popsection
22440 + GET_CR3_INTO_RDI
22441 + cmp $0,%dil
22442 + jz 111f
22443 + sub $4097,%rdi
22444 + or $4,%ebx
22445 + bts $63,%rdi
22446 + SET_RDI_INTO_CR3
22447 + mov $__UDEREF_KERNEL_DS,%edi
22448 + mov %edi,%ss
22449 +111:
22450 +#endif
22451 + .endm
22452 +
22453 + .macro pax_exit_kernel_nmi
22454 +#ifdef CONFIG_PAX_KERNEXEC
22455 + btr $1,%ebx
22456 + jnc 110f
22457 + GET_CR0_INTO_RDI
22458 + btr $16,%rdi
22459 + SET_RDI_INTO_CR0
22460 +110:
22461 +#endif
22462 +
22463 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22464 + btr $2,%ebx
22465 + jnc 111f
22466 + GET_CR3_INTO_RDI
22467 + add $4097,%rdi
22468 + bts $63,%rdi
22469 + SET_RDI_INTO_CR3
22470 + mov $__KERNEL_DS,%edi
22471 + mov %edi,%ss
22472 +111:
22473 +#endif
22474 + .endm
22475 +
22476 + .macro pax_erase_kstack
22477 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22478 + call pax_erase_kstack
22479 +#endif
22480 + .endm
22481 +
22482 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
22483 +ENTRY(pax_erase_kstack)
22484 + pushq %rdi
22485 + pushq %rcx
22486 + pushq %rax
22487 + pushq %r11
22488 +
22489 + GET_THREAD_INFO(%r11)
22490 + mov TI_lowest_stack(%r11), %rdi
22491 + mov $-0xBEEF, %rax
22492 + std
22493 +
22494 +1: mov %edi, %ecx
22495 + and $THREAD_SIZE_asm - 1, %ecx
22496 + shr $3, %ecx
22497 + repne scasq
22498 + jecxz 2f
22499 +
22500 + cmp $2*8, %ecx
22501 + jc 2f
22502 +
22503 + mov $2*8, %ecx
22504 + repe scasq
22505 + jecxz 2f
22506 + jne 1b
22507 +
22508 +2: cld
22509 + mov %esp, %ecx
22510 + sub %edi, %ecx
22511 +
22512 + cmp $THREAD_SIZE_asm, %rcx
22513 + jb 3f
22514 + ud2
22515 +3:
22516 +
22517 + shr $3, %ecx
22518 + rep stosq
22519 +
22520 + mov TI_task_thread_sp0(%r11), %rdi
22521 + sub $256, %rdi
22522 + mov %rdi, TI_lowest_stack(%r11)
22523 +
22524 + popq %r11
22525 + popq %rax
22526 + popq %rcx
22527 + popq %rdi
22528 + pax_force_retaddr
22529 + ret
22530 +ENDPROC(pax_erase_kstack)
22531 +#endif
22532
22533 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
22534 #ifdef CONFIG_TRACE_IRQFLAGS
22535 @@ -320,7 +753,7 @@ ENDPROC(native_usergs_sysret64)
22536 .endm
22537
22538 .macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
22539 - bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
22540 + bt $X86_EFLAGS_IF_BIT,EFLAGS-\offset(%rsp) /* interrupts off? */
22541 jnc 1f
22542 TRACE_IRQS_ON_DEBUG
22543 1:
22544 @@ -358,27 +791,6 @@ ENDPROC(native_usergs_sysret64)
22545 movq \tmp,R11+\offset(%rsp)
22546 .endm
22547
22548 - .macro FAKE_STACK_FRAME child_rip
22549 - /* push in order ss, rsp, eflags, cs, rip */
22550 - xorl %eax, %eax
22551 - pushq_cfi $__KERNEL_DS /* ss */
22552 - /*CFI_REL_OFFSET ss,0*/
22553 - pushq_cfi %rax /* rsp */
22554 - CFI_REL_OFFSET rsp,0
22555 - pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED) /* eflags - interrupts on */
22556 - /*CFI_REL_OFFSET rflags,0*/
22557 - pushq_cfi $__KERNEL_CS /* cs */
22558 - /*CFI_REL_OFFSET cs,0*/
22559 - pushq_cfi \child_rip /* rip */
22560 - CFI_REL_OFFSET rip,0
22561 - pushq_cfi %rax /* orig rax */
22562 - .endm
22563 -
22564 - .macro UNFAKE_STACK_FRAME
22565 - addq $8*6, %rsp
22566 - CFI_ADJUST_CFA_OFFSET -(6*8)
22567 - .endm
22568 -
22569 /*
22570 * initial frame state for interrupts (and exceptions without error code)
22571 */
22572 @@ -445,25 +857,26 @@ ENDPROC(native_usergs_sysret64)
22573 /* save partial stack frame */
22574 .macro SAVE_ARGS_IRQ
22575 cld
22576 - /* start from rbp in pt_regs and jump over */
22577 - movq_cfi rdi, (RDI-RBP)
22578 - movq_cfi rsi, (RSI-RBP)
22579 - movq_cfi rdx, (RDX-RBP)
22580 - movq_cfi rcx, (RCX-RBP)
22581 - movq_cfi rax, (RAX-RBP)
22582 - movq_cfi r8, (R8-RBP)
22583 - movq_cfi r9, (R9-RBP)
22584 - movq_cfi r10, (R10-RBP)
22585 - movq_cfi r11, (R11-RBP)
22586 + /* start from r15 in pt_regs and jump over */
22587 + movq_cfi rdi, RDI
22588 + movq_cfi rsi, RSI
22589 + movq_cfi rdx, RDX
22590 + movq_cfi rcx, RCX
22591 + movq_cfi rax, RAX
22592 + movq_cfi r8, R8
22593 + movq_cfi r9, R9
22594 + movq_cfi r10, R10
22595 + movq_cfi r11, R11
22596 + movq_cfi r12, R12
22597
22598 /* Save rbp so that we can unwind from get_irq_regs() */
22599 - movq_cfi rbp, 0
22600 + movq_cfi rbp, RBP
22601
22602 /* Save previous stack value */
22603 movq %rsp, %rsi
22604
22605 - leaq -RBP(%rsp),%rdi /* arg1 for handler */
22606 - testl $3, CS-RBP(%rsi)
22607 + movq %rsp,%rdi /* arg1 for handler */
22608 + testb $3, CS(%rsi)
22609 je 1f
22610 SWAPGS
22611 /*
22612 @@ -514,9 +927,10 @@ ENTRY(save_paranoid)
22613 js 1f /* negative -> in kernel */
22614 SWAPGS
22615 xorl %ebx,%ebx
22616 -1: ret
22617 +1: pax_force_retaddr_bts
22618 + ret
22619 CFI_ENDPROC
22620 -END(save_paranoid)
22621 +ENDPROC(save_paranoid)
22622 .popsection
22623
22624 /*
22625 @@ -538,7 +952,7 @@ ENTRY(ret_from_fork)
22626
22627 RESTORE_REST
22628
22629 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
22630 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
22631 jz 1f
22632
22633 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
22634 @@ -548,15 +962,13 @@ ENTRY(ret_from_fork)
22635 jmp ret_from_sys_call # go to the SYSRET fastpath
22636
22637 1:
22638 - subq $REST_SKIP, %rsp # leave space for volatiles
22639 - CFI_ADJUST_CFA_OFFSET REST_SKIP
22640 movq %rbp, %rdi
22641 call *%rbx
22642 movl $0, RAX(%rsp)
22643 RESTORE_REST
22644 jmp int_ret_from_sys_call
22645 CFI_ENDPROC
22646 -END(ret_from_fork)
22647 +ENDPROC(ret_from_fork)
22648
22649 /*
22650 * System call entry. Up to 6 arguments in registers are supported.
22651 @@ -593,7 +1005,7 @@ END(ret_from_fork)
22652 ENTRY(system_call)
22653 CFI_STARTPROC simple
22654 CFI_SIGNAL_FRAME
22655 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
22656 + CFI_DEF_CFA rsp,0
22657 CFI_REGISTER rip,rcx
22658 /*CFI_REGISTER rflags,r11*/
22659 SWAPGS_UNSAFE_STACK
22660 @@ -606,16 +1018,23 @@ GLOBAL(system_call_after_swapgs)
22661
22662 movq %rsp,PER_CPU_VAR(old_rsp)
22663 movq PER_CPU_VAR(kernel_stack),%rsp
22664 + SAVE_ARGS 8*6,0
22665 + pax_enter_kernel_user
22666 +
22667 +#ifdef CONFIG_PAX_RANDKSTACK
22668 + pax_erase_kstack
22669 +#endif
22670 +
22671 /*
22672 * No need to follow this irqs off/on section - it's straight
22673 * and short:
22674 */
22675 ENABLE_INTERRUPTS(CLBR_NONE)
22676 - SAVE_ARGS 8,0
22677 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
22678 movq %rcx,RIP-ARGOFFSET(%rsp)
22679 CFI_REL_OFFSET rip,RIP-ARGOFFSET
22680 - testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
22681 + GET_THREAD_INFO(%rcx)
22682 + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
22683 jnz tracesys
22684 system_call_fastpath:
22685 #if __SYSCALL_MASK == ~0
22686 @@ -639,10 +1058,13 @@ sysret_check:
22687 LOCKDEP_SYS_EXIT
22688 DISABLE_INTERRUPTS(CLBR_NONE)
22689 TRACE_IRQS_OFF
22690 - movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
22691 + GET_THREAD_INFO(%rcx)
22692 + movl TI_flags(%rcx),%edx
22693 andl %edi,%edx
22694 jnz sysret_careful
22695 CFI_REMEMBER_STATE
22696 + pax_exit_kernel_user
22697 + pax_erase_kstack
22698 /*
22699 * sysretq will re-enable interrupts:
22700 */
22701 @@ -701,6 +1123,9 @@ auditsys:
22702 movq %rax,%rsi /* 2nd arg: syscall number */
22703 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
22704 call __audit_syscall_entry
22705 +
22706 + pax_erase_kstack
22707 +
22708 LOAD_ARGS 0 /* reload call-clobbered registers */
22709 jmp system_call_fastpath
22710
22711 @@ -722,7 +1147,7 @@ sysret_audit:
22712 /* Do syscall tracing */
22713 tracesys:
22714 #ifdef CONFIG_AUDITSYSCALL
22715 - testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
22716 + testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
22717 jz auditsys
22718 #endif
22719 SAVE_REST
22720 @@ -730,12 +1155,15 @@ tracesys:
22721 FIXUP_TOP_OF_STACK %rdi
22722 movq %rsp,%rdi
22723 call syscall_trace_enter
22724 +
22725 + pax_erase_kstack
22726 +
22727 /*
22728 * Reload arg registers from stack in case ptrace changed them.
22729 * We don't reload %rax because syscall_trace_enter() returned
22730 * the value it wants us to use in the table lookup.
22731 */
22732 - LOAD_ARGS ARGOFFSET, 1
22733 + LOAD_ARGS 1
22734 RESTORE_REST
22735 #if __SYSCALL_MASK == ~0
22736 cmpq $__NR_syscall_max,%rax
22737 @@ -765,7 +1193,9 @@ GLOBAL(int_with_check)
22738 andl %edi,%edx
22739 jnz int_careful
22740 andl $~TS_COMPAT,TI_status(%rcx)
22741 - jmp retint_swapgs
22742 + pax_exit_kernel_user
22743 + pax_erase_kstack
22744 + jmp retint_swapgs_pax
22745
22746 /* Either reschedule or signal or syscall exit tracking needed. */
22747 /* First do a reschedule test. */
22748 @@ -811,7 +1241,7 @@ int_restore_rest:
22749 TRACE_IRQS_OFF
22750 jmp int_with_check
22751 CFI_ENDPROC
22752 -END(system_call)
22753 +ENDPROC(system_call)
22754
22755 .macro FORK_LIKE func
22756 ENTRY(stub_\func)
22757 @@ -824,9 +1254,10 @@ ENTRY(stub_\func)
22758 DEFAULT_FRAME 0 8 /* offset 8: return address */
22759 call sys_\func
22760 RESTORE_TOP_OF_STACK %r11, 8
22761 - ret $REST_SKIP /* pop extended registers */
22762 + pax_force_retaddr
22763 + ret
22764 CFI_ENDPROC
22765 -END(stub_\func)
22766 +ENDPROC(stub_\func)
22767 .endm
22768
22769 .macro FIXED_FRAME label,func
22770 @@ -836,9 +1267,10 @@ ENTRY(\label)
22771 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
22772 call \func
22773 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
22774 + pax_force_retaddr
22775 ret
22776 CFI_ENDPROC
22777 -END(\label)
22778 +ENDPROC(\label)
22779 .endm
22780
22781 FORK_LIKE clone
22782 @@ -846,19 +1278,6 @@ END(\label)
22783 FORK_LIKE vfork
22784 FIXED_FRAME stub_iopl, sys_iopl
22785
22786 -ENTRY(ptregscall_common)
22787 - DEFAULT_FRAME 1 8 /* offset 8: return address */
22788 - RESTORE_TOP_OF_STACK %r11, 8
22789 - movq_cfi_restore R15+8, r15
22790 - movq_cfi_restore R14+8, r14
22791 - movq_cfi_restore R13+8, r13
22792 - movq_cfi_restore R12+8, r12
22793 - movq_cfi_restore RBP+8, rbp
22794 - movq_cfi_restore RBX+8, rbx
22795 - ret $REST_SKIP /* pop extended registers */
22796 - CFI_ENDPROC
22797 -END(ptregscall_common)
22798 -
22799 ENTRY(stub_execve)
22800 CFI_STARTPROC
22801 addq $8, %rsp
22802 @@ -870,7 +1289,7 @@ ENTRY(stub_execve)
22803 RESTORE_REST
22804 jmp int_ret_from_sys_call
22805 CFI_ENDPROC
22806 -END(stub_execve)
22807 +ENDPROC(stub_execve)
22808
22809 /*
22810 * sigreturn is special because it needs to restore all registers on return.
22811 @@ -887,7 +1306,7 @@ ENTRY(stub_rt_sigreturn)
22812 RESTORE_REST
22813 jmp int_ret_from_sys_call
22814 CFI_ENDPROC
22815 -END(stub_rt_sigreturn)
22816 +ENDPROC(stub_rt_sigreturn)
22817
22818 #ifdef CONFIG_X86_X32_ABI
22819 ENTRY(stub_x32_rt_sigreturn)
22820 @@ -901,7 +1320,7 @@ ENTRY(stub_x32_rt_sigreturn)
22821 RESTORE_REST
22822 jmp int_ret_from_sys_call
22823 CFI_ENDPROC
22824 -END(stub_x32_rt_sigreturn)
22825 +ENDPROC(stub_x32_rt_sigreturn)
22826
22827 ENTRY(stub_x32_execve)
22828 CFI_STARTPROC
22829 @@ -915,7 +1334,7 @@ ENTRY(stub_x32_execve)
22830 RESTORE_REST
22831 jmp int_ret_from_sys_call
22832 CFI_ENDPROC
22833 -END(stub_x32_execve)
22834 +ENDPROC(stub_x32_execve)
22835
22836 #endif
22837
22838 @@ -952,7 +1371,7 @@ vector=vector+1
22839 2: jmp common_interrupt
22840 .endr
22841 CFI_ENDPROC
22842 -END(irq_entries_start)
22843 +ENDPROC(irq_entries_start)
22844
22845 .previous
22846 END(interrupt)
22847 @@ -969,9 +1388,19 @@ END(interrupt)
22848 /* 0(%rsp): ~(interrupt number) */
22849 .macro interrupt func
22850 /* reserve pt_regs for scratch regs and rbp */
22851 - subq $ORIG_RAX-RBP, %rsp
22852 - CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
22853 + subq $ORIG_RAX, %rsp
22854 + CFI_ADJUST_CFA_OFFSET ORIG_RAX
22855 SAVE_ARGS_IRQ
22856 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22857 + testb $3, CS(%rdi)
22858 + jnz 1f
22859 + pax_enter_kernel
22860 + jmp 2f
22861 +1: pax_enter_kernel_user
22862 +2:
22863 +#else
22864 + pax_enter_kernel
22865 +#endif
22866 call \func
22867 .endm
22868
22869 @@ -997,14 +1426,14 @@ ret_from_intr:
22870
22871 /* Restore saved previous stack */
22872 popq %rsi
22873 - CFI_DEF_CFA rsi,SS+8-RBP /* reg/off reset after def_cfa_expr */
22874 - leaq ARGOFFSET-RBP(%rsi), %rsp
22875 + CFI_DEF_CFA rsi,SS+8 /* reg/off reset after def_cfa_expr */
22876 + movq %rsi, %rsp
22877 CFI_DEF_CFA_REGISTER rsp
22878 - CFI_ADJUST_CFA_OFFSET RBP-ARGOFFSET
22879 + CFI_ADJUST_CFA_OFFSET -ARGOFFSET
22880
22881 exit_intr:
22882 GET_THREAD_INFO(%rcx)
22883 - testl $3,CS-ARGOFFSET(%rsp)
22884 + testb $3,CS-ARGOFFSET(%rsp)
22885 je retint_kernel
22886
22887 /* Interrupt came from user space */
22888 @@ -1026,12 +1455,16 @@ retint_swapgs: /* return to user-space */
22889 * The iretq could re-enable interrupts:
22890 */
22891 DISABLE_INTERRUPTS(CLBR_ANY)
22892 + pax_exit_kernel_user
22893 +retint_swapgs_pax:
22894 TRACE_IRQS_IRETQ
22895 SWAPGS
22896 jmp restore_args
22897
22898 retint_restore_args: /* return to kernel space */
22899 DISABLE_INTERRUPTS(CLBR_ANY)
22900 + pax_exit_kernel
22901 + pax_force_retaddr (RIP-ARGOFFSET)
22902 /*
22903 * The iretq could re-enable interrupts:
22904 */
22905 @@ -1114,7 +1547,7 @@ ENTRY(retint_kernel)
22906 #endif
22907
22908 CFI_ENDPROC
22909 -END(common_interrupt)
22910 +ENDPROC(common_interrupt)
22911 /*
22912 * End of kprobes section
22913 */
22914 @@ -1132,7 +1565,7 @@ ENTRY(\sym)
22915 interrupt \do_sym
22916 jmp ret_from_intr
22917 CFI_ENDPROC
22918 -END(\sym)
22919 +ENDPROC(\sym)
22920 .endm
22921
22922 #ifdef CONFIG_TRACING
22923 @@ -1215,12 +1648,22 @@ ENTRY(\sym)
22924 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22925 call error_entry
22926 DEFAULT_FRAME 0
22927 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22928 + testb $3, CS(%rsp)
22929 + jnz 1f
22930 + pax_enter_kernel
22931 + jmp 2f
22932 +1: pax_enter_kernel_user
22933 +2:
22934 +#else
22935 + pax_enter_kernel
22936 +#endif
22937 movq %rsp,%rdi /* pt_regs pointer */
22938 xorl %esi,%esi /* no error code */
22939 call \do_sym
22940 jmp error_exit /* %ebx: no swapgs flag */
22941 CFI_ENDPROC
22942 -END(\sym)
22943 +ENDPROC(\sym)
22944 .endm
22945
22946 .macro paranoidzeroentry sym do_sym
22947 @@ -1233,15 +1676,25 @@ ENTRY(\sym)
22948 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22949 call save_paranoid
22950 TRACE_IRQS_OFF
22951 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22952 + testb $3, CS(%rsp)
22953 + jnz 1f
22954 + pax_enter_kernel
22955 + jmp 2f
22956 +1: pax_enter_kernel_user
22957 +2:
22958 +#else
22959 + pax_enter_kernel
22960 +#endif
22961 movq %rsp,%rdi /* pt_regs pointer */
22962 xorl %esi,%esi /* no error code */
22963 call \do_sym
22964 jmp paranoid_exit /* %ebx: no swapgs flag */
22965 CFI_ENDPROC
22966 -END(\sym)
22967 +ENDPROC(\sym)
22968 .endm
22969
22970 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
22971 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r13)
22972 .macro paranoidzeroentry_ist sym do_sym ist
22973 ENTRY(\sym)
22974 INTR_FRAME
22975 @@ -1252,14 +1705,30 @@ ENTRY(\sym)
22976 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
22977 call save_paranoid
22978 TRACE_IRQS_OFF_DEBUG
22979 +#ifdef CONFIG_PAX_MEMORY_UDEREF
22980 + testb $3, CS(%rsp)
22981 + jnz 1f
22982 + pax_enter_kernel
22983 + jmp 2f
22984 +1: pax_enter_kernel_user
22985 +2:
22986 +#else
22987 + pax_enter_kernel
22988 +#endif
22989 movq %rsp,%rdi /* pt_regs pointer */
22990 xorl %esi,%esi /* no error code */
22991 +#ifdef CONFIG_SMP
22992 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r13d
22993 + lea init_tss(%r13), %r13
22994 +#else
22995 + lea init_tss(%rip), %r13
22996 +#endif
22997 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
22998 call \do_sym
22999 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
23000 jmp paranoid_exit /* %ebx: no swapgs flag */
23001 CFI_ENDPROC
23002 -END(\sym)
23003 +ENDPROC(\sym)
23004 .endm
23005
23006 .macro errorentry sym do_sym
23007 @@ -1271,13 +1740,23 @@ ENTRY(\sym)
23008 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
23009 call error_entry
23010 DEFAULT_FRAME 0
23011 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23012 + testb $3, CS(%rsp)
23013 + jnz 1f
23014 + pax_enter_kernel
23015 + jmp 2f
23016 +1: pax_enter_kernel_user
23017 +2:
23018 +#else
23019 + pax_enter_kernel
23020 +#endif
23021 movq %rsp,%rdi /* pt_regs pointer */
23022 movq ORIG_RAX(%rsp),%rsi /* get error code */
23023 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
23024 call \do_sym
23025 jmp error_exit /* %ebx: no swapgs flag */
23026 CFI_ENDPROC
23027 -END(\sym)
23028 +ENDPROC(\sym)
23029 .endm
23030
23031 /* error code is on the stack already */
23032 @@ -1291,13 +1770,23 @@ ENTRY(\sym)
23033 call save_paranoid
23034 DEFAULT_FRAME 0
23035 TRACE_IRQS_OFF
23036 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23037 + testb $3, CS(%rsp)
23038 + jnz 1f
23039 + pax_enter_kernel
23040 + jmp 2f
23041 +1: pax_enter_kernel_user
23042 +2:
23043 +#else
23044 + pax_enter_kernel
23045 +#endif
23046 movq %rsp,%rdi /* pt_regs pointer */
23047 movq ORIG_RAX(%rsp),%rsi /* get error code */
23048 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
23049 call \do_sym
23050 jmp paranoid_exit /* %ebx: no swapgs flag */
23051 CFI_ENDPROC
23052 -END(\sym)
23053 +ENDPROC(\sym)
23054 .endm
23055
23056 zeroentry divide_error do_divide_error
23057 @@ -1327,9 +1816,10 @@ gs_change:
23058 2: mfence /* workaround */
23059 SWAPGS
23060 popfq_cfi
23061 + pax_force_retaddr
23062 ret
23063 CFI_ENDPROC
23064 -END(native_load_gs_index)
23065 +ENDPROC(native_load_gs_index)
23066
23067 _ASM_EXTABLE(gs_change,bad_gs)
23068 .section .fixup,"ax"
23069 @@ -1357,9 +1847,10 @@ ENTRY(call_softirq)
23070 CFI_DEF_CFA_REGISTER rsp
23071 CFI_ADJUST_CFA_OFFSET -8
23072 decl PER_CPU_VAR(irq_count)
23073 + pax_force_retaddr
23074 ret
23075 CFI_ENDPROC
23076 -END(call_softirq)
23077 +ENDPROC(call_softirq)
23078
23079 #ifdef CONFIG_XEN
23080 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
23081 @@ -1397,7 +1888,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
23082 decl PER_CPU_VAR(irq_count)
23083 jmp error_exit
23084 CFI_ENDPROC
23085 -END(xen_do_hypervisor_callback)
23086 +ENDPROC(xen_do_hypervisor_callback)
23087
23088 /*
23089 * Hypervisor uses this for application faults while it executes.
23090 @@ -1456,7 +1947,7 @@ ENTRY(xen_failsafe_callback)
23091 SAVE_ALL
23092 jmp error_exit
23093 CFI_ENDPROC
23094 -END(xen_failsafe_callback)
23095 +ENDPROC(xen_failsafe_callback)
23096
23097 apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
23098 xen_hvm_callback_vector xen_evtchn_do_upcall
23099 @@ -1508,18 +1999,33 @@ ENTRY(paranoid_exit)
23100 DEFAULT_FRAME
23101 DISABLE_INTERRUPTS(CLBR_NONE)
23102 TRACE_IRQS_OFF_DEBUG
23103 - testl %ebx,%ebx /* swapgs needed? */
23104 + testl $1,%ebx /* swapgs needed? */
23105 jnz paranoid_restore
23106 - testl $3,CS(%rsp)
23107 + testb $3,CS(%rsp)
23108 jnz paranoid_userspace
23109 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23110 + pax_exit_kernel
23111 + TRACE_IRQS_IRETQ 0
23112 + SWAPGS_UNSAFE_STACK
23113 + RESTORE_ALL 8
23114 + pax_force_retaddr_bts
23115 + jmp irq_return
23116 +#endif
23117 paranoid_swapgs:
23118 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23119 + pax_exit_kernel_user
23120 +#else
23121 + pax_exit_kernel
23122 +#endif
23123 TRACE_IRQS_IRETQ 0
23124 SWAPGS_UNSAFE_STACK
23125 RESTORE_ALL 8
23126 jmp irq_return
23127 paranoid_restore:
23128 + pax_exit_kernel
23129 TRACE_IRQS_IRETQ_DEBUG 0
23130 RESTORE_ALL 8
23131 + pax_force_retaddr_bts
23132 jmp irq_return
23133 paranoid_userspace:
23134 GET_THREAD_INFO(%rcx)
23135 @@ -1548,7 +2054,7 @@ paranoid_schedule:
23136 TRACE_IRQS_OFF
23137 jmp paranoid_userspace
23138 CFI_ENDPROC
23139 -END(paranoid_exit)
23140 +ENDPROC(paranoid_exit)
23141
23142 /*
23143 * Exception entry point. This expects an error code/orig_rax on the stack.
23144 @@ -1575,12 +2081,13 @@ ENTRY(error_entry)
23145 movq_cfi r14, R14+8
23146 movq_cfi r15, R15+8
23147 xorl %ebx,%ebx
23148 - testl $3,CS+8(%rsp)
23149 + testb $3,CS+8(%rsp)
23150 je error_kernelspace
23151 error_swapgs:
23152 SWAPGS
23153 error_sti:
23154 TRACE_IRQS_OFF
23155 + pax_force_retaddr_bts
23156 ret
23157
23158 /*
23159 @@ -1607,7 +2114,7 @@ bstep_iret:
23160 movq %rcx,RIP+8(%rsp)
23161 jmp error_swapgs
23162 CFI_ENDPROC
23163 -END(error_entry)
23164 +ENDPROC(error_entry)
23165
23166
23167 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
23168 @@ -1618,7 +2125,7 @@ ENTRY(error_exit)
23169 DISABLE_INTERRUPTS(CLBR_NONE)
23170 TRACE_IRQS_OFF
23171 GET_THREAD_INFO(%rcx)
23172 - testl %eax,%eax
23173 + testl $1,%eax
23174 jne retint_kernel
23175 LOCKDEP_SYS_EXIT_IRQ
23176 movl TI_flags(%rcx),%edx
23177 @@ -1627,7 +2134,7 @@ ENTRY(error_exit)
23178 jnz retint_careful
23179 jmp retint_swapgs
23180 CFI_ENDPROC
23181 -END(error_exit)
23182 +ENDPROC(error_exit)
23183
23184 /*
23185 * Test if a given stack is an NMI stack or not.
23186 @@ -1685,9 +2192,11 @@ ENTRY(nmi)
23187 * If %cs was not the kernel segment, then the NMI triggered in user
23188 * space, which means it is definitely not nested.
23189 */
23190 + cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
23191 + je 1f
23192 cmpl $__KERNEL_CS, 16(%rsp)
23193 jne first_nmi
23194 -
23195 +1:
23196 /*
23197 * Check the special variable on the stack to see if NMIs are
23198 * executing.
23199 @@ -1721,8 +2230,7 @@ nested_nmi:
23200
23201 1:
23202 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
23203 - leaq -1*8(%rsp), %rdx
23204 - movq %rdx, %rsp
23205 + subq $8, %rsp
23206 CFI_ADJUST_CFA_OFFSET 1*8
23207 leaq -10*8(%rsp), %rdx
23208 pushq_cfi $__KERNEL_DS
23209 @@ -1740,6 +2248,7 @@ nested_nmi_out:
23210 CFI_RESTORE rdx
23211
23212 /* No need to check faults here */
23213 +# pax_force_retaddr_bts
23214 INTERRUPT_RETURN
23215
23216 CFI_RESTORE_STATE
23217 @@ -1852,9 +2361,11 @@ end_repeat_nmi:
23218 * NMI itself takes a page fault, the page fault that was preempted
23219 * will read the information from the NMI page fault and not the
23220 * origin fault. Save it off and restore it if it changes.
23221 - * Use the r12 callee-saved register.
23222 + * Use the r13 callee-saved register.
23223 */
23224 - movq %cr2, %r12
23225 + movq %cr2, %r13
23226 +
23227 + pax_enter_kernel_nmi
23228
23229 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
23230 movq %rsp,%rdi
23231 @@ -1863,31 +2374,36 @@ end_repeat_nmi:
23232
23233 /* Did the NMI take a page fault? Restore cr2 if it did */
23234 movq %cr2, %rcx
23235 - cmpq %rcx, %r12
23236 + cmpq %rcx, %r13
23237 je 1f
23238 - movq %r12, %cr2
23239 + movq %r13, %cr2
23240 1:
23241
23242 - testl %ebx,%ebx /* swapgs needed? */
23243 + testl $1,%ebx /* swapgs needed? */
23244 jnz nmi_restore
23245 nmi_swapgs:
23246 SWAPGS_UNSAFE_STACK
23247 nmi_restore:
23248 + pax_exit_kernel_nmi
23249 /* Pop the extra iret frame at once */
23250 RESTORE_ALL 6*8
23251 + testb $3, 8(%rsp)
23252 + jnz 1f
23253 + pax_force_retaddr_bts
23254 +1:
23255
23256 /* Clear the NMI executing stack variable */
23257 movq $0, 5*8(%rsp)
23258 jmp irq_return
23259 CFI_ENDPROC
23260 -END(nmi)
23261 +ENDPROC(nmi)
23262
23263 ENTRY(ignore_sysret)
23264 CFI_STARTPROC
23265 mov $-ENOSYS,%eax
23266 sysret
23267 CFI_ENDPROC
23268 -END(ignore_sysret)
23269 +ENDPROC(ignore_sysret)
23270
23271 /*
23272 * End of kprobes section
23273 diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
23274 index d4bdd25..912664c 100644
23275 --- a/arch/x86/kernel/ftrace.c
23276 +++ b/arch/x86/kernel/ftrace.c
23277 @@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
23278 {
23279 unsigned char replaced[MCOUNT_INSN_SIZE];
23280
23281 + ip = ktla_ktva(ip);
23282 +
23283 /*
23284 * Note: Due to modules and __init, code can
23285 * disappear and change, we need to protect against faulting
23286 @@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
23287 unsigned char old[MCOUNT_INSN_SIZE], *new;
23288 int ret;
23289
23290 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
23291 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
23292 new = ftrace_call_replace(ip, (unsigned long)func);
23293
23294 /* See comment above by declaration of modifying_ftrace_code */
23295 @@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
23296 /* Also update the regs callback function */
23297 if (!ret) {
23298 ip = (unsigned long)(&ftrace_regs_call);
23299 - memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
23300 + memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
23301 new = ftrace_call_replace(ip, (unsigned long)func);
23302 ret = ftrace_modify_code(ip, old, new);
23303 }
23304 @@ -291,7 +293,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
23305 * kernel identity mapping to modify code.
23306 */
23307 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
23308 - ip = (unsigned long)__va(__pa_symbol(ip));
23309 + ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
23310
23311 return probe_kernel_write((void *)ip, val, size);
23312 }
23313 @@ -301,7 +303,7 @@ static int add_break(unsigned long ip, const char *old)
23314 unsigned char replaced[MCOUNT_INSN_SIZE];
23315 unsigned char brk = BREAKPOINT_INSTRUCTION;
23316
23317 - if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
23318 + if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
23319 return -EFAULT;
23320
23321 /* Make sure it is what we expect it to be */
23322 @@ -649,7 +651,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
23323 return ret;
23324
23325 fail_update:
23326 - probe_kernel_write((void *)ip, &old_code[0], 1);
23327 + probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
23328 goto out;
23329 }
23330
23331 @@ -682,6 +684,8 @@ static int ftrace_mod_jmp(unsigned long ip,
23332 {
23333 unsigned char code[MCOUNT_INSN_SIZE];
23334
23335 + ip = ktla_ktva(ip);
23336 +
23337 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
23338 return -EFAULT;
23339
23340 diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
23341 index 1be8e43..d9b9ef6 100644
23342 --- a/arch/x86/kernel/head64.c
23343 +++ b/arch/x86/kernel/head64.c
23344 @@ -67,12 +67,12 @@ again:
23345 pgd = *pgd_p;
23346
23347 /*
23348 - * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
23349 - * critical -- __PAGE_OFFSET would point us back into the dynamic
23350 + * The use of __early_va rather than __va here is critical:
23351 + * __va would point us back into the dynamic
23352 * range and we might end up looping forever...
23353 */
23354 if (pgd)
23355 - pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
23356 + pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
23357 else {
23358 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
23359 reset_early_page_tables();
23360 @@ -82,13 +82,13 @@ again:
23361 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
23362 for (i = 0; i < PTRS_PER_PUD; i++)
23363 pud_p[i] = 0;
23364 - *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
23365 + *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
23366 }
23367 pud_p += pud_index(address);
23368 pud = *pud_p;
23369
23370 if (pud)
23371 - pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
23372 + pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
23373 else {
23374 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
23375 reset_early_page_tables();
23376 @@ -98,7 +98,7 @@ again:
23377 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
23378 for (i = 0; i < PTRS_PER_PMD; i++)
23379 pmd_p[i] = 0;
23380 - *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
23381 + *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
23382 }
23383 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
23384 pmd_p[pmd_index(address)] = pmd;
23385 @@ -175,7 +175,6 @@ asmlinkage void __init x86_64_start_kernel(char * real_mode_data)
23386 if (console_loglevel == 10)
23387 early_printk("Kernel alive\n");
23388
23389 - clear_page(init_level4_pgt);
23390 /* set init_level4_pgt kernel high mapping*/
23391 init_level4_pgt[511] = early_level4_pgt[511];
23392
23393 diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
23394 index 81ba276..30c5411 100644
23395 --- a/arch/x86/kernel/head_32.S
23396 +++ b/arch/x86/kernel/head_32.S
23397 @@ -26,6 +26,12 @@
23398 /* Physical address */
23399 #define pa(X) ((X) - __PAGE_OFFSET)
23400
23401 +#ifdef CONFIG_PAX_KERNEXEC
23402 +#define ta(X) (X)
23403 +#else
23404 +#define ta(X) ((X) - __PAGE_OFFSET)
23405 +#endif
23406 +
23407 /*
23408 * References to members of the new_cpu_data structure.
23409 */
23410 @@ -55,11 +61,7 @@
23411 * and small than max_low_pfn, otherwise will waste some page table entries
23412 */
23413
23414 -#if PTRS_PER_PMD > 1
23415 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
23416 -#else
23417 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
23418 -#endif
23419 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
23420
23421 /* Number of possible pages in the lowmem region */
23422 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
23423 @@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
23424 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
23425
23426 /*
23427 + * Real beginning of normal "text" segment
23428 + */
23429 +ENTRY(stext)
23430 +ENTRY(_stext)
23431 +
23432 +/*
23433 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
23434 * %esi points to the real-mode code as a 32-bit pointer.
23435 * CS and DS must be 4 GB flat segments, but we don't depend on
23436 @@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
23437 * can.
23438 */
23439 __HEAD
23440 +
23441 +#ifdef CONFIG_PAX_KERNEXEC
23442 + jmp startup_32
23443 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
23444 +.fill PAGE_SIZE-5,1,0xcc
23445 +#endif
23446 +
23447 ENTRY(startup_32)
23448 movl pa(stack_start),%ecx
23449
23450 @@ -106,6 +121,59 @@ ENTRY(startup_32)
23451 2:
23452 leal -__PAGE_OFFSET(%ecx),%esp
23453
23454 +#ifdef CONFIG_SMP
23455 + movl $pa(cpu_gdt_table),%edi
23456 + movl $__per_cpu_load,%eax
23457 + movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
23458 + rorl $16,%eax
23459 + movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
23460 + movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
23461 + movl $__per_cpu_end - 1,%eax
23462 + subl $__per_cpu_start,%eax
23463 + movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
23464 +#endif
23465 +
23466 +#ifdef CONFIG_PAX_MEMORY_UDEREF
23467 + movl $NR_CPUS,%ecx
23468 + movl $pa(cpu_gdt_table),%edi
23469 +1:
23470 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
23471 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
23472 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
23473 + addl $PAGE_SIZE_asm,%edi
23474 + loop 1b
23475 +#endif
23476 +
23477 +#ifdef CONFIG_PAX_KERNEXEC
23478 + movl $pa(boot_gdt),%edi
23479 + movl $__LOAD_PHYSICAL_ADDR,%eax
23480 + movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
23481 + rorl $16,%eax
23482 + movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
23483 + movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
23484 + rorl $16,%eax
23485 +
23486 + ljmp $(__BOOT_CS),$1f
23487 +1:
23488 +
23489 + movl $NR_CPUS,%ecx
23490 + movl $pa(cpu_gdt_table),%edi
23491 + addl $__PAGE_OFFSET,%eax
23492 +1:
23493 + movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
23494 + movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
23495 + movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
23496 + movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
23497 + rorl $16,%eax
23498 + movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
23499 + movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
23500 + movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
23501 + movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
23502 + rorl $16,%eax
23503 + addl $PAGE_SIZE_asm,%edi
23504 + loop 1b
23505 +#endif
23506 +
23507 /*
23508 * Clear BSS first so that there are no surprises...
23509 */
23510 @@ -201,8 +269,11 @@ ENTRY(startup_32)
23511 movl %eax, pa(max_pfn_mapped)
23512
23513 /* Do early initialization of the fixmap area */
23514 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
23515 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
23516 +#ifdef CONFIG_COMPAT_VDSO
23517 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
23518 +#else
23519 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
23520 +#endif
23521 #else /* Not PAE */
23522
23523 page_pde_offset = (__PAGE_OFFSET >> 20);
23524 @@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
23525 movl %eax, pa(max_pfn_mapped)
23526
23527 /* Do early initialization of the fixmap area */
23528 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
23529 - movl %eax,pa(initial_page_table+0xffc)
23530 +#ifdef CONFIG_COMPAT_VDSO
23531 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
23532 +#else
23533 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
23534 +#endif
23535 #endif
23536
23537 #ifdef CONFIG_PARAVIRT
23538 @@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
23539 cmpl $num_subarch_entries, %eax
23540 jae bad_subarch
23541
23542 - movl pa(subarch_entries)(,%eax,4), %eax
23543 - subl $__PAGE_OFFSET, %eax
23544 - jmp *%eax
23545 + jmp *pa(subarch_entries)(,%eax,4)
23546
23547 bad_subarch:
23548 WEAK(lguest_entry)
23549 @@ -261,10 +333,10 @@ WEAK(xen_entry)
23550 __INITDATA
23551
23552 subarch_entries:
23553 - .long default_entry /* normal x86/PC */
23554 - .long lguest_entry /* lguest hypervisor */
23555 - .long xen_entry /* Xen hypervisor */
23556 - .long default_entry /* Moorestown MID */
23557 + .long ta(default_entry) /* normal x86/PC */
23558 + .long ta(lguest_entry) /* lguest hypervisor */
23559 + .long ta(xen_entry) /* Xen hypervisor */
23560 + .long ta(default_entry) /* Moorestown MID */
23561 num_subarch_entries = (. - subarch_entries) / 4
23562 .previous
23563 #else
23564 @@ -354,6 +426,7 @@ default_entry:
23565 movl pa(mmu_cr4_features),%eax
23566 movl %eax,%cr4
23567
23568 +#ifdef CONFIG_X86_PAE
23569 testb $X86_CR4_PAE, %al # check if PAE is enabled
23570 jz enable_paging
23571
23572 @@ -382,6 +455,9 @@ default_entry:
23573 /* Make changes effective */
23574 wrmsr
23575
23576 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
23577 +#endif
23578 +
23579 enable_paging:
23580
23581 /*
23582 @@ -449,14 +525,20 @@ is486:
23583 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
23584 movl %eax,%ss # after changing gdt.
23585
23586 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
23587 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
23588 movl %eax,%ds
23589 movl %eax,%es
23590
23591 movl $(__KERNEL_PERCPU), %eax
23592 movl %eax,%fs # set this cpu's percpu
23593
23594 +#ifdef CONFIG_CC_STACKPROTECTOR
23595 movl $(__KERNEL_STACK_CANARY),%eax
23596 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
23597 + movl $(__USER_DS),%eax
23598 +#else
23599 + xorl %eax,%eax
23600 +#endif
23601 movl %eax,%gs
23602
23603 xorl %eax,%eax # Clear LDT
23604 @@ -512,8 +594,11 @@ setup_once:
23605 * relocation. Manually set base address in stack canary
23606 * segment descriptor.
23607 */
23608 - movl $gdt_page,%eax
23609 + movl $cpu_gdt_table,%eax
23610 movl $stack_canary,%ecx
23611 +#ifdef CONFIG_SMP
23612 + addl $__per_cpu_load,%ecx
23613 +#endif
23614 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
23615 shrl $16, %ecx
23616 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
23617 @@ -544,7 +629,7 @@ ENDPROC(early_idt_handlers)
23618 /* This is global to keep gas from relaxing the jumps */
23619 ENTRY(early_idt_handler)
23620 cld
23621 - cmpl $2,%ss:early_recursion_flag
23622 + cmpl $1,%ss:early_recursion_flag
23623 je hlt_loop
23624 incl %ss:early_recursion_flag
23625
23626 @@ -582,8 +667,8 @@ ENTRY(early_idt_handler)
23627 pushl (20+6*4)(%esp) /* trapno */
23628 pushl $fault_msg
23629 call printk
23630 -#endif
23631 call dump_stack
23632 +#endif
23633 hlt_loop:
23634 hlt
23635 jmp hlt_loop
23636 @@ -602,8 +687,11 @@ ENDPROC(early_idt_handler)
23637 /* This is the default interrupt "handler" :-) */
23638 ALIGN
23639 ignore_int:
23640 - cld
23641 #ifdef CONFIG_PRINTK
23642 + cmpl $2,%ss:early_recursion_flag
23643 + je hlt_loop
23644 + incl %ss:early_recursion_flag
23645 + cld
23646 pushl %eax
23647 pushl %ecx
23648 pushl %edx
23649 @@ -612,9 +700,6 @@ ignore_int:
23650 movl $(__KERNEL_DS),%eax
23651 movl %eax,%ds
23652 movl %eax,%es
23653 - cmpl $2,early_recursion_flag
23654 - je hlt_loop
23655 - incl early_recursion_flag
23656 pushl 16(%esp)
23657 pushl 24(%esp)
23658 pushl 32(%esp)
23659 @@ -648,29 +733,34 @@ ENTRY(setup_once_ref)
23660 /*
23661 * BSS section
23662 */
23663 -__PAGE_ALIGNED_BSS
23664 - .align PAGE_SIZE
23665 #ifdef CONFIG_X86_PAE
23666 +.section .initial_pg_pmd,"a",@progbits
23667 initial_pg_pmd:
23668 .fill 1024*KPMDS,4,0
23669 #else
23670 +.section .initial_page_table,"a",@progbits
23671 ENTRY(initial_page_table)
23672 .fill 1024,4,0
23673 #endif
23674 +.section .initial_pg_fixmap,"a",@progbits
23675 initial_pg_fixmap:
23676 .fill 1024,4,0
23677 +.section .empty_zero_page,"a",@progbits
23678 ENTRY(empty_zero_page)
23679 .fill 4096,1,0
23680 +.section .swapper_pg_dir,"a",@progbits
23681 ENTRY(swapper_pg_dir)
23682 +#ifdef CONFIG_X86_PAE
23683 + .fill 4,8,0
23684 +#else
23685 .fill 1024,4,0
23686 +#endif
23687
23688 /*
23689 * This starts the data section.
23690 */
23691 #ifdef CONFIG_X86_PAE
23692 -__PAGE_ALIGNED_DATA
23693 - /* Page-aligned for the benefit of paravirt? */
23694 - .align PAGE_SIZE
23695 +.section .initial_page_table,"a",@progbits
23696 ENTRY(initial_page_table)
23697 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
23698 # if KPMDS == 3
23699 @@ -689,12 +779,20 @@ ENTRY(initial_page_table)
23700 # error "Kernel PMDs should be 1, 2 or 3"
23701 # endif
23702 .align PAGE_SIZE /* needs to be page-sized too */
23703 +
23704 +#ifdef CONFIG_PAX_PER_CPU_PGD
23705 +ENTRY(cpu_pgd)
23706 + .rept 2*NR_CPUS
23707 + .fill 4,8,0
23708 + .endr
23709 +#endif
23710 +
23711 #endif
23712
23713 .data
23714 .balign 4
23715 ENTRY(stack_start)
23716 - .long init_thread_union+THREAD_SIZE
23717 + .long init_thread_union+THREAD_SIZE-8
23718
23719 __INITRODATA
23720 int_msg:
23721 @@ -722,7 +820,7 @@ fault_msg:
23722 * segment size, and 32-bit linear address value:
23723 */
23724
23725 - .data
23726 +.section .rodata,"a",@progbits
23727 .globl boot_gdt_descr
23728 .globl idt_descr
23729
23730 @@ -731,7 +829,7 @@ fault_msg:
23731 .word 0 # 32 bit align gdt_desc.address
23732 boot_gdt_descr:
23733 .word __BOOT_DS+7
23734 - .long boot_gdt - __PAGE_OFFSET
23735 + .long pa(boot_gdt)
23736
23737 .word 0 # 32-bit align idt_desc.address
23738 idt_descr:
23739 @@ -742,7 +840,7 @@ idt_descr:
23740 .word 0 # 32 bit align gdt_desc.address
23741 ENTRY(early_gdt_descr)
23742 .word GDT_ENTRIES*8-1
23743 - .long gdt_page /* Overwritten for secondary CPUs */
23744 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
23745
23746 /*
23747 * The boot_gdt must mirror the equivalent in setup.S and is
23748 @@ -751,5 +849,65 @@ ENTRY(early_gdt_descr)
23749 .align L1_CACHE_BYTES
23750 ENTRY(boot_gdt)
23751 .fill GDT_ENTRY_BOOT_CS,8,0
23752 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
23753 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
23754 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
23755 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
23756 +
23757 + .align PAGE_SIZE_asm
23758 +ENTRY(cpu_gdt_table)
23759 + .rept NR_CPUS
23760 + .quad 0x0000000000000000 /* NULL descriptor */
23761 + .quad 0x0000000000000000 /* 0x0b reserved */
23762 + .quad 0x0000000000000000 /* 0x13 reserved */
23763 + .quad 0x0000000000000000 /* 0x1b reserved */
23764 +
23765 +#ifdef CONFIG_PAX_KERNEXEC
23766 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
23767 +#else
23768 + .quad 0x0000000000000000 /* 0x20 unused */
23769 +#endif
23770 +
23771 + .quad 0x0000000000000000 /* 0x28 unused */
23772 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
23773 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
23774 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
23775 + .quad 0x0000000000000000 /* 0x4b reserved */
23776 + .quad 0x0000000000000000 /* 0x53 reserved */
23777 + .quad 0x0000000000000000 /* 0x5b reserved */
23778 +
23779 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
23780 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
23781 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
23782 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
23783 +
23784 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
23785 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
23786 +
23787 + /*
23788 + * Segments used for calling PnP BIOS have byte granularity.
23789 + * The code segments and data segments have fixed 64k limits,
23790 + * the transfer segment sizes are set at run time.
23791 + */
23792 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
23793 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
23794 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
23795 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
23796 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
23797 +
23798 + /*
23799 + * The APM segments have byte granularity and their bases
23800 + * are set at run time. All have 64k limits.
23801 + */
23802 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
23803 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
23804 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
23805 +
23806 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
23807 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
23808 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
23809 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
23810 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
23811 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
23812 +
23813 + /* Be sure this is zeroed to avoid false validations in Xen */
23814 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
23815 + .endr
23816 diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
23817 index e1aabdb..fee4fee 100644
23818 --- a/arch/x86/kernel/head_64.S
23819 +++ b/arch/x86/kernel/head_64.S
23820 @@ -20,6 +20,8 @@
23821 #include <asm/processor-flags.h>
23822 #include <asm/percpu.h>
23823 #include <asm/nops.h>
23824 +#include <asm/cpufeature.h>
23825 +#include <asm/alternative-asm.h>
23826
23827 #ifdef CONFIG_PARAVIRT
23828 #include <asm/asm-offsets.h>
23829 @@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
23830 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
23831 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
23832 L3_START_KERNEL = pud_index(__START_KERNEL_map)
23833 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
23834 +L3_VMALLOC_START = pud_index(VMALLOC_START)
23835 +L4_VMALLOC_END = pgd_index(VMALLOC_END)
23836 +L3_VMALLOC_END = pud_index(VMALLOC_END)
23837 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
23838 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
23839
23840 .text
23841 __HEAD
23842 @@ -89,11 +97,24 @@ startup_64:
23843 * Fixup the physical addresses in the page table
23844 */
23845 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
23846 + addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
23847 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
23848 + addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
23849 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
23850 + addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
23851
23852 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
23853 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
23854 + addq %rbp, level3_ident_pgt + (0*8)(%rip)
23855 +#ifndef CONFIG_XEN
23856 + addq %rbp, level3_ident_pgt + (1*8)(%rip)
23857 +#endif
23858 +
23859 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
23860 +
23861 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
23862 + addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
23863
23864 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
23865 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
23866
23867 /*
23868 * Set up the identity mapping for the switchover. These
23869 @@ -177,8 +198,8 @@ ENTRY(secondary_startup_64)
23870 movq $(init_level4_pgt - __START_KERNEL_map), %rax
23871 1:
23872
23873 - /* Enable PAE mode and PGE */
23874 - movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
23875 + /* Enable PAE mode and PSE/PGE */
23876 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
23877 movq %rcx, %cr4
23878
23879 /* Setup early boot stage 4 level pagetables. */
23880 @@ -199,10 +220,19 @@ ENTRY(secondary_startup_64)
23881 movl $MSR_EFER, %ecx
23882 rdmsr
23883 btsl $_EFER_SCE, %eax /* Enable System Call */
23884 - btl $20,%edi /* No Execute supported? */
23885 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
23886 jnc 1f
23887 btsl $_EFER_NX, %eax
23888 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
23889 +#ifndef CONFIG_EFI
23890 + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_PAGE_OFFSET(%rip)
23891 +#endif
23892 + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_START(%rip)
23893 + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMALLOC_END(%rip)
23894 + btsq $_PAGE_BIT_NX, init_level4_pgt + 8*L4_VMEMMAP_START(%rip)
23895 + btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*506(%rip)
23896 + btsq $_PAGE_BIT_NX, level2_fixmap_pgt + 8*507(%rip)
23897 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
23898 1: wrmsr /* Make changes effective */
23899
23900 /* Setup cr0 */
23901 @@ -282,6 +312,7 @@ ENTRY(secondary_startup_64)
23902 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
23903 * address given in m16:64.
23904 */
23905 + pax_set_fptr_mask
23906 movq initial_code(%rip),%rax
23907 pushq $0 # fake return address to stop unwinder
23908 pushq $__KERNEL_CS # set correct cs
23909 @@ -388,7 +419,7 @@ ENTRY(early_idt_handler)
23910 call dump_stack
23911 #ifdef CONFIG_KALLSYMS
23912 leaq early_idt_ripmsg(%rip),%rdi
23913 - movq 40(%rsp),%rsi # %rip again
23914 + movq 88(%rsp),%rsi # %rip again
23915 call __print_symbol
23916 #endif
23917 #endif /* EARLY_PRINTK */
23918 @@ -416,6 +447,7 @@ ENDPROC(early_idt_handler)
23919 early_recursion_flag:
23920 .long 0
23921
23922 + .section .rodata,"a",@progbits
23923 #ifdef CONFIG_EARLY_PRINTK
23924 early_idt_msg:
23925 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
23926 @@ -443,29 +475,52 @@ NEXT_PAGE(early_level4_pgt)
23927 NEXT_PAGE(early_dynamic_pgts)
23928 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
23929
23930 - .data
23931 + .section .rodata,"a",@progbits
23932
23933 -#ifndef CONFIG_XEN
23934 NEXT_PAGE(init_level4_pgt)
23935 - .fill 512,8,0
23936 -#else
23937 -NEXT_PAGE(init_level4_pgt)
23938 - .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23939 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
23940 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23941 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
23942 + .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
23943 + .org init_level4_pgt + L4_VMALLOC_END*8, 0
23944 + .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
23945 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
23946 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
23947 .org init_level4_pgt + L4_START_KERNEL*8, 0
23948 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
23949 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
23950
23951 +#ifdef CONFIG_PAX_PER_CPU_PGD
23952 +NEXT_PAGE(cpu_pgd)
23953 + .rept 2*NR_CPUS
23954 + .fill 512,8,0
23955 + .endr
23956 +#endif
23957 +
23958 NEXT_PAGE(level3_ident_pgt)
23959 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
23960 +#ifdef CONFIG_XEN
23961 .fill 511, 8, 0
23962 +#else
23963 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
23964 + .fill 510,8,0
23965 +#endif
23966 +
23967 +NEXT_PAGE(level3_vmalloc_start_pgt)
23968 + .fill 512,8,0
23969 +
23970 +NEXT_PAGE(level3_vmalloc_end_pgt)
23971 + .fill 512,8,0
23972 +
23973 +NEXT_PAGE(level3_vmemmap_pgt)
23974 + .fill L3_VMEMMAP_START,8,0
23975 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
23976 +
23977 NEXT_PAGE(level2_ident_pgt)
23978 - /* Since I easily can, map the first 1G.
23979 + /* Since I easily can, map the first 2G.
23980 * Don't set NX because code runs from these pages.
23981 */
23982 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
23983 -#endif
23984 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
23985
23986 NEXT_PAGE(level3_kernel_pgt)
23987 .fill L3_START_KERNEL,8,0
23988 @@ -473,6 +528,9 @@ NEXT_PAGE(level3_kernel_pgt)
23989 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
23990 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
23991
23992 +NEXT_PAGE(level2_vmemmap_pgt)
23993 + .fill 512,8,0
23994 +
23995 NEXT_PAGE(level2_kernel_pgt)
23996 /*
23997 * 512 MB kernel mapping. We spend a full page on this pagetable
23998 @@ -490,28 +548,64 @@ NEXT_PAGE(level2_kernel_pgt)
23999 NEXT_PAGE(level2_fixmap_pgt)
24000 .fill 506,8,0
24001 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
24002 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
24003 - .fill 5,8,0
24004 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
24005 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
24006 + .fill 4,8,0
24007
24008 NEXT_PAGE(level1_fixmap_pgt)
24009 .fill 512,8,0
24010
24011 +NEXT_PAGE(level1_vsyscall_pgt)
24012 + .fill 512,8,0
24013 +
24014 #undef PMDS
24015
24016 - .data
24017 + .align PAGE_SIZE
24018 +ENTRY(cpu_gdt_table)
24019 + .rept NR_CPUS
24020 + .quad 0x0000000000000000 /* NULL descriptor */
24021 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
24022 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
24023 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
24024 + .quad 0x00cffb000000ffff /* __USER32_CS */
24025 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
24026 + .quad 0x00affb000000ffff /* __USER_CS */
24027 +
24028 +#ifdef CONFIG_PAX_KERNEXEC
24029 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
24030 +#else
24031 + .quad 0x0 /* unused */
24032 +#endif
24033 +
24034 + .quad 0,0 /* TSS */
24035 + .quad 0,0 /* LDT */
24036 + .quad 0,0,0 /* three TLS descriptors */
24037 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
24038 + /* asm/segment.h:GDT_ENTRIES must match this */
24039 +
24040 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24041 + .quad 0x00cf93000000ffff /* __UDEREF_KERNEL_DS */
24042 +#else
24043 + .quad 0x0 /* unused */
24044 +#endif
24045 +
24046 + /* zero the remaining page */
24047 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
24048 + .endr
24049 +
24050 .align 16
24051 .globl early_gdt_descr
24052 early_gdt_descr:
24053 .word GDT_ENTRIES*8-1
24054 early_gdt_descr_base:
24055 - .quad INIT_PER_CPU_VAR(gdt_page)
24056 + .quad cpu_gdt_table
24057
24058 ENTRY(phys_base)
24059 /* This must match the first entry in level2_kernel_pgt */
24060 .quad 0x0000000000000000
24061
24062 #include "../../x86/xen/xen-head.S"
24063 -
24064 - __PAGE_ALIGNED_BSS
24065 +
24066 + .section .rodata,"a",@progbits
24067 NEXT_PAGE(empty_zero_page)
24068 .skip PAGE_SIZE
24069 diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
24070 index 0fa6912..b37438b 100644
24071 --- a/arch/x86/kernel/i386_ksyms_32.c
24072 +++ b/arch/x86/kernel/i386_ksyms_32.c
24073 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
24074 EXPORT_SYMBOL(cmpxchg8b_emu);
24075 #endif
24076
24077 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
24078 +
24079 /* Networking helper routines. */
24080 EXPORT_SYMBOL(csum_partial_copy_generic);
24081 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
24082 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
24083
24084 EXPORT_SYMBOL(__get_user_1);
24085 EXPORT_SYMBOL(__get_user_2);
24086 @@ -37,3 +41,11 @@ EXPORT_SYMBOL(strstr);
24087
24088 EXPORT_SYMBOL(csum_partial);
24089 EXPORT_SYMBOL(empty_zero_page);
24090 +
24091 +#ifdef CONFIG_PAX_KERNEXEC
24092 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
24093 +#endif
24094 +
24095 +#ifdef CONFIG_PAX_PER_CPU_PGD
24096 +EXPORT_SYMBOL(cpu_pgd);
24097 +#endif
24098 diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
24099 index 5d576ab..1403a03 100644
24100 --- a/arch/x86/kernel/i387.c
24101 +++ b/arch/x86/kernel/i387.c
24102 @@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
24103 static inline bool interrupted_user_mode(void)
24104 {
24105 struct pt_regs *regs = get_irq_regs();
24106 - return regs && user_mode_vm(regs);
24107 + return regs && user_mode(regs);
24108 }
24109
24110 /*
24111 diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
24112 index 9a5c460..84868423 100644
24113 --- a/arch/x86/kernel/i8259.c
24114 +++ b/arch/x86/kernel/i8259.c
24115 @@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
24116 static void make_8259A_irq(unsigned int irq)
24117 {
24118 disable_irq_nosync(irq);
24119 - io_apic_irqs &= ~(1<<irq);
24120 + io_apic_irqs &= ~(1UL<<irq);
24121 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
24122 i8259A_chip.name);
24123 enable_irq(irq);
24124 @@ -209,7 +209,7 @@ spurious_8259A_irq:
24125 "spurious 8259A interrupt: IRQ%d.\n", irq);
24126 spurious_irq_mask |= irqmask;
24127 }
24128 - atomic_inc(&irq_err_count);
24129 + atomic_inc_unchecked(&irq_err_count);
24130 /*
24131 * Theoretically we do not have to handle this IRQ,
24132 * but in Linux this does not cause problems and is
24133 @@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
24134 /* (slave's support for AEOI in flat mode is to be investigated) */
24135 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
24136
24137 + pax_open_kernel();
24138 if (auto_eoi)
24139 /*
24140 * In AEOI mode we just have to mask the interrupt
24141 * when acking.
24142 */
24143 - i8259A_chip.irq_mask_ack = disable_8259A_irq;
24144 + *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
24145 else
24146 - i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
24147 + *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
24148 + pax_close_kernel();
24149
24150 udelay(100); /* wait for 8259A to initialize */
24151
24152 diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
24153 index a979b5b..1d6db75 100644
24154 --- a/arch/x86/kernel/io_delay.c
24155 +++ b/arch/x86/kernel/io_delay.c
24156 @@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
24157 * Quirk table for systems that misbehave (lock up, etc.) if port
24158 * 0x80 is used:
24159 */
24160 -static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
24161 +static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
24162 {
24163 .callback = dmi_io_delay_0xed_port,
24164 .ident = "Compaq Presario V6000",
24165 diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
24166 index 4ddaf66..49d5c18 100644
24167 --- a/arch/x86/kernel/ioport.c
24168 +++ b/arch/x86/kernel/ioport.c
24169 @@ -6,6 +6,7 @@
24170 #include <linux/sched.h>
24171 #include <linux/kernel.h>
24172 #include <linux/capability.h>
24173 +#include <linux/security.h>
24174 #include <linux/errno.h>
24175 #include <linux/types.h>
24176 #include <linux/ioport.h>
24177 @@ -30,6 +31,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
24178 return -EINVAL;
24179 if (turn_on && !capable(CAP_SYS_RAWIO))
24180 return -EPERM;
24181 +#ifdef CONFIG_GRKERNSEC_IO
24182 + if (turn_on && grsec_disable_privio) {
24183 + gr_handle_ioperm();
24184 + return -ENODEV;
24185 + }
24186 +#endif
24187
24188 /*
24189 * If it's the first ioperm() call in this thread's lifetime, set the
24190 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
24191 * because the ->io_bitmap_max value must match the bitmap
24192 * contents:
24193 */
24194 - tss = &per_cpu(init_tss, get_cpu());
24195 + tss = init_tss + get_cpu();
24196
24197 if (turn_on)
24198 bitmap_clear(t->io_bitmap_ptr, from, num);
24199 @@ -105,6 +112,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
24200 if (level > old) {
24201 if (!capable(CAP_SYS_RAWIO))
24202 return -EPERM;
24203 +#ifdef CONFIG_GRKERNSEC_IO
24204 + if (grsec_disable_privio) {
24205 + gr_handle_iopl();
24206 + return -ENODEV;
24207 + }
24208 +#endif
24209 }
24210 regs->flags = (regs->flags & ~X86_EFLAGS_IOPL) | (level << 12);
24211 t->iopl = level << 12;
24212 diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
24213 index 22d0687..e07b2a5 100644
24214 --- a/arch/x86/kernel/irq.c
24215 +++ b/arch/x86/kernel/irq.c
24216 @@ -21,7 +21,7 @@
24217 #define CREATE_TRACE_POINTS
24218 #include <asm/trace/irq_vectors.h>
24219
24220 -atomic_t irq_err_count;
24221 +atomic_unchecked_t irq_err_count;
24222
24223 /* Function pointer for generic interrupt vector handling */
24224 void (*x86_platform_ipi_callback)(void) = NULL;
24225 @@ -125,9 +125,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
24226 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
24227 seq_printf(p, " Machine check polls\n");
24228 #endif
24229 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
24230 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
24231 #if defined(CONFIG_X86_IO_APIC)
24232 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
24233 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
24234 #endif
24235 return 0;
24236 }
24237 @@ -167,7 +167,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
24238
24239 u64 arch_irq_stat(void)
24240 {
24241 - u64 sum = atomic_read(&irq_err_count);
24242 + u64 sum = atomic_read_unchecked(&irq_err_count);
24243 return sum;
24244 }
24245
24246 diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
24247 index 4186755..784efa0 100644
24248 --- a/arch/x86/kernel/irq_32.c
24249 +++ b/arch/x86/kernel/irq_32.c
24250 @@ -39,7 +39,7 @@ static int check_stack_overflow(void)
24251 __asm__ __volatile__("andl %%esp,%0" :
24252 "=r" (sp) : "0" (THREAD_SIZE - 1));
24253
24254 - return sp < (sizeof(struct thread_info) + STACK_WARN);
24255 + return sp < STACK_WARN;
24256 }
24257
24258 static void print_stack_overflow(void)
24259 @@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
24260 * per-CPU IRQ handling contexts (thread information and stack)
24261 */
24262 union irq_ctx {
24263 - struct thread_info tinfo;
24264 - u32 stack[THREAD_SIZE/sizeof(u32)];
24265 + unsigned long previous_esp;
24266 + u32 stack[THREAD_SIZE/sizeof(u32)];
24267 } __attribute__((aligned(THREAD_SIZE)));
24268
24269 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
24270 @@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
24271 static inline int
24272 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24273 {
24274 - union irq_ctx *curctx, *irqctx;
24275 + union irq_ctx *irqctx;
24276 u32 *isp, arg1, arg2;
24277
24278 - curctx = (union irq_ctx *) current_thread_info();
24279 irqctx = __this_cpu_read(hardirq_ctx);
24280
24281 /*
24282 @@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24283 * handler) we can't do that and just have to keep using the
24284 * current stack (which is the irq stack already after all)
24285 */
24286 - if (unlikely(curctx == irqctx))
24287 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
24288 return 0;
24289
24290 /* build the stack frame on the IRQ stack */
24291 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
24292 - irqctx->tinfo.task = curctx->tinfo.task;
24293 - irqctx->tinfo.previous_esp = current_stack_pointer;
24294 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
24295 + irqctx->previous_esp = current_stack_pointer;
24296
24297 - /* Copy the preempt_count so that the [soft]irq checks work. */
24298 - irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
24299 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24300 + __set_fs(MAKE_MM_SEG(0));
24301 +#endif
24302
24303 if (unlikely(overflow))
24304 call_on_stack(print_stack_overflow, isp);
24305 @@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24306 : "0" (irq), "1" (desc), "2" (isp),
24307 "D" (desc->handle_irq)
24308 : "memory", "cc", "ecx");
24309 +
24310 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24311 + __set_fs(current_thread_info()->addr_limit);
24312 +#endif
24313 +
24314 return 1;
24315 }
24316
24317 @@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
24318 */
24319 void irq_ctx_init(int cpu)
24320 {
24321 - union irq_ctx *irqctx;
24322 -
24323 if (per_cpu(hardirq_ctx, cpu))
24324 return;
24325
24326 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
24327 - THREADINFO_GFP,
24328 - THREAD_SIZE_ORDER));
24329 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
24330 - irqctx->tinfo.cpu = cpu;
24331 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
24332 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
24333 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
24334 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
24335
24336 - per_cpu(hardirq_ctx, cpu) = irqctx;
24337 -
24338 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
24339 - THREADINFO_GFP,
24340 - THREAD_SIZE_ORDER));
24341 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
24342 - irqctx->tinfo.cpu = cpu;
24343 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
24344 -
24345 - per_cpu(softirq_ctx, cpu) = irqctx;
24346 + printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
24347 + cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
24348
24349 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
24350 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
24351 @@ -152,7 +141,6 @@ void irq_ctx_init(int cpu)
24352 asmlinkage void do_softirq(void)
24353 {
24354 unsigned long flags;
24355 - struct thread_info *curctx;
24356 union irq_ctx *irqctx;
24357 u32 *isp;
24358
24359 @@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
24360 local_irq_save(flags);
24361
24362 if (local_softirq_pending()) {
24363 - curctx = current_thread_info();
24364 irqctx = __this_cpu_read(softirq_ctx);
24365 - irqctx->tinfo.task = curctx->task;
24366 - irqctx->tinfo.previous_esp = current_stack_pointer;
24367 + irqctx->previous_esp = current_stack_pointer;
24368
24369 /* build the stack frame on the softirq stack */
24370 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
24371 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
24372 +
24373 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24374 + __set_fs(MAKE_MM_SEG(0));
24375 +#endif
24376
24377 call_on_stack(__do_softirq, isp);
24378 +
24379 +#ifdef CONFIG_PAX_MEMORY_UDEREF
24380 + __set_fs(current_thread_info()->addr_limit);
24381 +#endif
24382 +
24383 /*
24384 * Shouldn't happen, we returned above if in_interrupt():
24385 */
24386 @@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
24387 if (unlikely(!desc))
24388 return false;
24389
24390 - if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
24391 + if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
24392 if (unlikely(overflow))
24393 print_stack_overflow();
24394 desc->handle_irq(irq, desc);
24395 diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
24396 index d04d3ec..ea4b374 100644
24397 --- a/arch/x86/kernel/irq_64.c
24398 +++ b/arch/x86/kernel/irq_64.c
24399 @@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
24400 u64 estack_top, estack_bottom;
24401 u64 curbase = (u64)task_stack_page(current);
24402
24403 - if (user_mode_vm(regs))
24404 + if (user_mode(regs))
24405 return;
24406
24407 if (regs->sp >= curbase + sizeof(struct thread_info) +
24408 diff --git a/arch/x86/kernel/jump_label.c b/arch/x86/kernel/jump_label.c
24409 index ee11b7d..4df4d0c 100644
24410 --- a/arch/x86/kernel/jump_label.c
24411 +++ b/arch/x86/kernel/jump_label.c
24412 @@ -49,7 +49,7 @@ static void __jump_label_transform(struct jump_entry *entry,
24413 * We are enabling this jump label. If it is not a nop
24414 * then something must have gone wrong.
24415 */
24416 - if (unlikely(memcmp((void *)entry->code, ideal_nop, 5) != 0))
24417 + if (unlikely(memcmp((void *)ktla_ktva(entry->code), ideal_nop, 5) != 0))
24418 bug_at((void *)entry->code, __LINE__);
24419
24420 code.jump = 0xe9;
24421 @@ -64,13 +64,13 @@ static void __jump_label_transform(struct jump_entry *entry,
24422 */
24423 if (init) {
24424 const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
24425 - if (unlikely(memcmp((void *)entry->code, default_nop, 5) != 0))
24426 + if (unlikely(memcmp((void *)ktla_ktva(entry->code), default_nop, 5) != 0))
24427 bug_at((void *)entry->code, __LINE__);
24428 } else {
24429 code.jump = 0xe9;
24430 code.offset = entry->target -
24431 (entry->code + JUMP_LABEL_NOP_SIZE);
24432 - if (unlikely(memcmp((void *)entry->code, &code, 5) != 0))
24433 + if (unlikely(memcmp((void *)ktla_ktva(entry->code), &code, 5) != 0))
24434 bug_at((void *)entry->code, __LINE__);
24435 }
24436 memcpy(&code, ideal_nops[NOP_ATOMIC5], JUMP_LABEL_NOP_SIZE);
24437 diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
24438 index 836f832..a8bda67 100644
24439 --- a/arch/x86/kernel/kgdb.c
24440 +++ b/arch/x86/kernel/kgdb.c
24441 @@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
24442 #ifdef CONFIG_X86_32
24443 switch (regno) {
24444 case GDB_SS:
24445 - if (!user_mode_vm(regs))
24446 + if (!user_mode(regs))
24447 *(unsigned long *)mem = __KERNEL_DS;
24448 break;
24449 case GDB_SP:
24450 - if (!user_mode_vm(regs))
24451 + if (!user_mode(regs))
24452 *(unsigned long *)mem = kernel_stack_pointer(regs);
24453 break;
24454 case GDB_GS:
24455 @@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
24456 bp->attr.bp_addr = breakinfo[breakno].addr;
24457 bp->attr.bp_len = breakinfo[breakno].len;
24458 bp->attr.bp_type = breakinfo[breakno].type;
24459 - info->address = breakinfo[breakno].addr;
24460 + if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
24461 + info->address = ktla_ktva(breakinfo[breakno].addr);
24462 + else
24463 + info->address = breakinfo[breakno].addr;
24464 info->len = breakinfo[breakno].len;
24465 info->type = breakinfo[breakno].type;
24466 val = arch_install_hw_breakpoint(bp);
24467 @@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
24468 case 'k':
24469 /* clear the trace bit */
24470 linux_regs->flags &= ~X86_EFLAGS_TF;
24471 - atomic_set(&kgdb_cpu_doing_single_step, -1);
24472 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
24473
24474 /* set the trace bit if we're stepping */
24475 if (remcomInBuffer[0] == 's') {
24476 linux_regs->flags |= X86_EFLAGS_TF;
24477 - atomic_set(&kgdb_cpu_doing_single_step,
24478 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
24479 raw_smp_processor_id());
24480 }
24481
24482 @@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
24483
24484 switch (cmd) {
24485 case DIE_DEBUG:
24486 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
24487 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
24488 if (user_mode(regs))
24489 return single_step_cont(regs, args);
24490 break;
24491 @@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
24492 #endif /* CONFIG_DEBUG_RODATA */
24493
24494 bpt->type = BP_BREAKPOINT;
24495 - err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
24496 + err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
24497 BREAK_INSTR_SIZE);
24498 if (err)
24499 return err;
24500 - err = probe_kernel_write((char *)bpt->bpt_addr,
24501 + err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
24502 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
24503 #ifdef CONFIG_DEBUG_RODATA
24504 if (!err)
24505 @@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
24506 return -EBUSY;
24507 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
24508 BREAK_INSTR_SIZE);
24509 - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
24510 + err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
24511 if (err)
24512 return err;
24513 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
24514 @@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
24515 if (mutex_is_locked(&text_mutex))
24516 goto knl_write;
24517 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
24518 - err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
24519 + err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
24520 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
24521 goto knl_write;
24522 return err;
24523 knl_write:
24524 #endif /* CONFIG_DEBUG_RODATA */
24525 - return probe_kernel_write((char *)bpt->bpt_addr,
24526 + return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
24527 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
24528 }
24529
24530 diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
24531 index 79a3f96..6ba030a 100644
24532 --- a/arch/x86/kernel/kprobes/core.c
24533 +++ b/arch/x86/kernel/kprobes/core.c
24534 @@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
24535 s32 raddr;
24536 } __packed *insn;
24537
24538 - insn = (struct __arch_relative_insn *)from;
24539 + insn = (struct __arch_relative_insn *)ktla_ktva(from);
24540 +
24541 + pax_open_kernel();
24542 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
24543 insn->op = op;
24544 + pax_close_kernel();
24545 }
24546
24547 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
24548 @@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
24549 kprobe_opcode_t opcode;
24550 kprobe_opcode_t *orig_opcodes = opcodes;
24551
24552 - if (search_exception_tables((unsigned long)opcodes))
24553 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
24554 return 0; /* Page fault may occur on this address. */
24555
24556 retry:
24557 @@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
24558 * for the first byte, we can recover the original instruction
24559 * from it and kp->opcode.
24560 */
24561 - memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
24562 + memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
24563 buf[0] = kp->opcode;
24564 - return (unsigned long)buf;
24565 + return ktva_ktla((unsigned long)buf);
24566 }
24567
24568 /*
24569 @@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
24570 /* Another subsystem puts a breakpoint, failed to recover */
24571 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
24572 return 0;
24573 + pax_open_kernel();
24574 memcpy(dest, insn.kaddr, insn.length);
24575 + pax_close_kernel();
24576
24577 #ifdef CONFIG_X86_64
24578 if (insn_rip_relative(&insn)) {
24579 @@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
24580 return 0;
24581 }
24582 disp = (u8 *) dest + insn_offset_displacement(&insn);
24583 + pax_open_kernel();
24584 *(s32 *) disp = (s32) newdisp;
24585 + pax_close_kernel();
24586 }
24587 #endif
24588 return insn.length;
24589 @@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
24590 * nor set current_kprobe, because it doesn't use single
24591 * stepping.
24592 */
24593 - regs->ip = (unsigned long)p->ainsn.insn;
24594 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
24595 preempt_enable_no_resched();
24596 return;
24597 }
24598 @@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
24599 regs->flags &= ~X86_EFLAGS_IF;
24600 /* single step inline if the instruction is an int3 */
24601 if (p->opcode == BREAKPOINT_INSTRUCTION)
24602 - regs->ip = (unsigned long)p->addr;
24603 + regs->ip = ktla_ktva((unsigned long)p->addr);
24604 else
24605 - regs->ip = (unsigned long)p->ainsn.insn;
24606 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
24607 }
24608
24609 /*
24610 @@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
24611 setup_singlestep(p, regs, kcb, 0);
24612 return 1;
24613 }
24614 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
24615 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
24616 /*
24617 * The breakpoint instruction was removed right
24618 * after we hit it. Another cpu has removed
24619 @@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
24620 " movq %rax, 152(%rsp)\n"
24621 RESTORE_REGS_STRING
24622 " popfq\n"
24623 +#ifdef KERNEXEC_PLUGIN
24624 + " btsq $63,(%rsp)\n"
24625 +#endif
24626 #else
24627 " pushf\n"
24628 SAVE_REGS_STRING
24629 @@ -779,7 +789,7 @@ static void __kprobes
24630 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
24631 {
24632 unsigned long *tos = stack_addr(regs);
24633 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
24634 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
24635 unsigned long orig_ip = (unsigned long)p->addr;
24636 kprobe_opcode_t *insn = p->ainsn.insn;
24637
24638 @@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
24639 struct die_args *args = data;
24640 int ret = NOTIFY_DONE;
24641
24642 - if (args->regs && user_mode_vm(args->regs))
24643 + if (args->regs && user_mode(args->regs))
24644 return ret;
24645
24646 switch (val) {
24647 diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
24648 index 898160b..758cde8 100644
24649 --- a/arch/x86/kernel/kprobes/opt.c
24650 +++ b/arch/x86/kernel/kprobes/opt.c
24651 @@ -79,6 +79,7 @@ found:
24652 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
24653 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
24654 {
24655 + pax_open_kernel();
24656 #ifdef CONFIG_X86_64
24657 *addr++ = 0x48;
24658 *addr++ = 0xbf;
24659 @@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
24660 *addr++ = 0xb8;
24661 #endif
24662 *(unsigned long *)addr = val;
24663 + pax_close_kernel();
24664 }
24665
24666 asm (
24667 @@ -335,7 +337,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
24668 * Verify if the address gap is in 2GB range, because this uses
24669 * a relative jump.
24670 */
24671 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
24672 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
24673 if (abs(rel) > 0x7fffffff)
24674 return -ERANGE;
24675
24676 @@ -350,16 +352,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
24677 op->optinsn.size = ret;
24678
24679 /* Copy arch-dep-instance from template */
24680 - memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
24681 + pax_open_kernel();
24682 + memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
24683 + pax_close_kernel();
24684
24685 /* Set probe information */
24686 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
24687
24688 /* Set probe function call */
24689 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
24690 + synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
24691
24692 /* Set returning jmp instruction at the tail of out-of-line buffer */
24693 - synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
24694 + synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
24695 (u8 *)op->kp.addr + op->optinsn.size);
24696
24697 flush_icache_range((unsigned long) buf,
24698 @@ -384,7 +388,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist)
24699 WARN_ON(kprobe_disabled(&op->kp));
24700
24701 /* Backup instructions which will be replaced by jump address */
24702 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
24703 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
24704 RELATIVE_ADDR_SIZE);
24705
24706 insn_buf[0] = RELATIVEJUMP_OPCODE;
24707 @@ -433,7 +437,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
24708 /* This kprobe is really able to run optimized path. */
24709 op = container_of(p, struct optimized_kprobe, kp);
24710 /* Detour through copied instructions */
24711 - regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
24712 + regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
24713 if (!reenter)
24714 reset_current_kprobe();
24715 preempt_enable_no_resched();
24716 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
24717 index ebc9873..1b9724b 100644
24718 --- a/arch/x86/kernel/ldt.c
24719 +++ b/arch/x86/kernel/ldt.c
24720 @@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
24721 if (reload) {
24722 #ifdef CONFIG_SMP
24723 preempt_disable();
24724 - load_LDT(pc);
24725 + load_LDT_nolock(pc);
24726 if (!cpumask_equal(mm_cpumask(current->mm),
24727 cpumask_of(smp_processor_id())))
24728 smp_call_function(flush_ldt, current->mm, 1);
24729 preempt_enable();
24730 #else
24731 - load_LDT(pc);
24732 + load_LDT_nolock(pc);
24733 #endif
24734 }
24735 if (oldsize) {
24736 @@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
24737 return err;
24738
24739 for (i = 0; i < old->size; i++)
24740 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
24741 + write_ldt_entry(new->ldt, i, old->ldt + i);
24742 return 0;
24743 }
24744
24745 @@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
24746 retval = copy_ldt(&mm->context, &old_mm->context);
24747 mutex_unlock(&old_mm->context.lock);
24748 }
24749 +
24750 + if (tsk == current) {
24751 + mm->context.vdso = 0;
24752 +
24753 +#ifdef CONFIG_X86_32
24754 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24755 + mm->context.user_cs_base = 0UL;
24756 + mm->context.user_cs_limit = ~0UL;
24757 +
24758 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
24759 + cpus_clear(mm->context.cpu_user_cs_mask);
24760 +#endif
24761 +
24762 +#endif
24763 +#endif
24764 +
24765 + }
24766 +
24767 return retval;
24768 }
24769
24770 @@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
24771 }
24772 }
24773
24774 +#ifdef CONFIG_PAX_SEGMEXEC
24775 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
24776 + error = -EINVAL;
24777 + goto out_unlock;
24778 + }
24779 +#endif
24780 +
24781 fill_ldt(&ldt, &ldt_info);
24782 if (oldmode)
24783 ldt.avl = 0;
24784 diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
24785 index 5b19e4d..6476a76 100644
24786 --- a/arch/x86/kernel/machine_kexec_32.c
24787 +++ b/arch/x86/kernel/machine_kexec_32.c
24788 @@ -26,7 +26,7 @@
24789 #include <asm/cacheflush.h>
24790 #include <asm/debugreg.h>
24791
24792 -static void set_idt(void *newidt, __u16 limit)
24793 +static void set_idt(struct desc_struct *newidt, __u16 limit)
24794 {
24795 struct desc_ptr curidt;
24796
24797 @@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
24798 }
24799
24800
24801 -static void set_gdt(void *newgdt, __u16 limit)
24802 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
24803 {
24804 struct desc_ptr curgdt;
24805
24806 @@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
24807 }
24808
24809 control_page = page_address(image->control_code_page);
24810 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
24811 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
24812
24813 relocate_kernel_ptr = control_page;
24814 page_list[PA_CONTROL_PAGE] = __pa(control_page);
24815 diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
24816 index 15c9876..0a43909 100644
24817 --- a/arch/x86/kernel/microcode_core.c
24818 +++ b/arch/x86/kernel/microcode_core.c
24819 @@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
24820 return NOTIFY_OK;
24821 }
24822
24823 -static struct notifier_block __refdata mc_cpu_notifier = {
24824 +static struct notifier_block mc_cpu_notifier = {
24825 .notifier_call = mc_cpu_callback,
24826 };
24827
24828 diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
24829 index 5fb2ceb..3ae90bb 100644
24830 --- a/arch/x86/kernel/microcode_intel.c
24831 +++ b/arch/x86/kernel/microcode_intel.c
24832 @@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
24833
24834 static int get_ucode_user(void *to, const void *from, size_t n)
24835 {
24836 - return copy_from_user(to, from, n);
24837 + return copy_from_user(to, (const void __force_user *)from, n);
24838 }
24839
24840 static enum ucode_state
24841 request_microcode_user(int cpu, const void __user *buf, size_t size)
24842 {
24843 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
24844 + return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
24845 }
24846
24847 static void microcode_fini_cpu(int cpu)
24848 diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
24849 index 216a4d7..228255a 100644
24850 --- a/arch/x86/kernel/module.c
24851 +++ b/arch/x86/kernel/module.c
24852 @@ -43,15 +43,60 @@ do { \
24853 } while (0)
24854 #endif
24855
24856 -void *module_alloc(unsigned long size)
24857 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
24858 {
24859 - if (PAGE_ALIGN(size) > MODULES_LEN)
24860 + if (!size || PAGE_ALIGN(size) > MODULES_LEN)
24861 return NULL;
24862 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
24863 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
24864 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
24865 -1, __builtin_return_address(0));
24866 }
24867
24868 +void *module_alloc(unsigned long size)
24869 +{
24870 +
24871 +#ifdef CONFIG_PAX_KERNEXEC
24872 + return __module_alloc(size, PAGE_KERNEL);
24873 +#else
24874 + return __module_alloc(size, PAGE_KERNEL_EXEC);
24875 +#endif
24876 +
24877 +}
24878 +
24879 +#ifdef CONFIG_PAX_KERNEXEC
24880 +#ifdef CONFIG_X86_32
24881 +void *module_alloc_exec(unsigned long size)
24882 +{
24883 + struct vm_struct *area;
24884 +
24885 + if (size == 0)
24886 + return NULL;
24887 +
24888 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
24889 + return area ? area->addr : NULL;
24890 +}
24891 +EXPORT_SYMBOL(module_alloc_exec);
24892 +
24893 +void module_free_exec(struct module *mod, void *module_region)
24894 +{
24895 + vunmap(module_region);
24896 +}
24897 +EXPORT_SYMBOL(module_free_exec);
24898 +#else
24899 +void module_free_exec(struct module *mod, void *module_region)
24900 +{
24901 + module_free(mod, module_region);
24902 +}
24903 +EXPORT_SYMBOL(module_free_exec);
24904 +
24905 +void *module_alloc_exec(unsigned long size)
24906 +{
24907 + return __module_alloc(size, PAGE_KERNEL_RX);
24908 +}
24909 +EXPORT_SYMBOL(module_alloc_exec);
24910 +#endif
24911 +#endif
24912 +
24913 #ifdef CONFIG_X86_32
24914 int apply_relocate(Elf32_Shdr *sechdrs,
24915 const char *strtab,
24916 @@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
24917 unsigned int i;
24918 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
24919 Elf32_Sym *sym;
24920 - uint32_t *location;
24921 + uint32_t *plocation, location;
24922
24923 DEBUGP("Applying relocate section %u to %u\n",
24924 relsec, sechdrs[relsec].sh_info);
24925 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
24926 /* This is where to make the change */
24927 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
24928 - + rel[i].r_offset;
24929 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
24930 + location = (uint32_t)plocation;
24931 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
24932 + plocation = ktla_ktva((void *)plocation);
24933 /* This is the symbol it is referring to. Note that all
24934 undefined symbols have been resolved. */
24935 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
24936 @@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
24937 switch (ELF32_R_TYPE(rel[i].r_info)) {
24938 case R_386_32:
24939 /* We add the value into the location given */
24940 - *location += sym->st_value;
24941 + pax_open_kernel();
24942 + *plocation += sym->st_value;
24943 + pax_close_kernel();
24944 break;
24945 case R_386_PC32:
24946 /* Add the value, subtract its position */
24947 - *location += sym->st_value - (uint32_t)location;
24948 + pax_open_kernel();
24949 + *plocation += sym->st_value - location;
24950 + pax_close_kernel();
24951 break;
24952 default:
24953 pr_err("%s: Unknown relocation: %u\n",
24954 @@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
24955 case R_X86_64_NONE:
24956 break;
24957 case R_X86_64_64:
24958 + pax_open_kernel();
24959 *(u64 *)loc = val;
24960 + pax_close_kernel();
24961 break;
24962 case R_X86_64_32:
24963 + pax_open_kernel();
24964 *(u32 *)loc = val;
24965 + pax_close_kernel();
24966 if (val != *(u32 *)loc)
24967 goto overflow;
24968 break;
24969 case R_X86_64_32S:
24970 + pax_open_kernel();
24971 *(s32 *)loc = val;
24972 + pax_close_kernel();
24973 if ((s64)val != *(s32 *)loc)
24974 goto overflow;
24975 break;
24976 case R_X86_64_PC32:
24977 val -= (u64)loc;
24978 + pax_open_kernel();
24979 *(u32 *)loc = val;
24980 + pax_close_kernel();
24981 +
24982 #if 0
24983 if ((s64)val != *(s32 *)loc)
24984 goto overflow;
24985 diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
24986 index 88458fa..349f7a4 100644
24987 --- a/arch/x86/kernel/msr.c
24988 +++ b/arch/x86/kernel/msr.c
24989 @@ -233,7 +233,7 @@ static int msr_class_cpu_callback(struct notifier_block *nfb,
24990 return notifier_from_errno(err);
24991 }
24992
24993 -static struct notifier_block __refdata msr_class_cpu_notifier = {
24994 +static struct notifier_block msr_class_cpu_notifier = {
24995 .notifier_call = msr_class_cpu_callback,
24996 };
24997
24998 diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
24999 index 6fcb49c..5b3f4ff 100644
25000 --- a/arch/x86/kernel/nmi.c
25001 +++ b/arch/x86/kernel/nmi.c
25002 @@ -138,7 +138,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
25003 return handled;
25004 }
25005
25006 -int __register_nmi_handler(unsigned int type, struct nmiaction *action)
25007 +int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
25008 {
25009 struct nmi_desc *desc = nmi_to_desc(type);
25010 unsigned long flags;
25011 @@ -162,9 +162,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
25012 * event confuses some handlers (kdump uses this flag)
25013 */
25014 if (action->flags & NMI_FLAG_FIRST)
25015 - list_add_rcu(&action->list, &desc->head);
25016 + pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
25017 else
25018 - list_add_tail_rcu(&action->list, &desc->head);
25019 + pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
25020
25021 spin_unlock_irqrestore(&desc->lock, flags);
25022 return 0;
25023 @@ -187,7 +187,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
25024 if (!strcmp(n->name, name)) {
25025 WARN(in_nmi(),
25026 "Trying to free NMI (%s) from NMI context!\n", n->name);
25027 - list_del_rcu(&n->list);
25028 + pax_list_del_rcu((struct list_head *)&n->list);
25029 break;
25030 }
25031 }
25032 @@ -512,6 +512,17 @@ static inline void nmi_nesting_postprocess(void)
25033 dotraplinkage notrace __kprobes void
25034 do_nmi(struct pt_regs *regs, long error_code)
25035 {
25036 +
25037 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
25038 + if (!user_mode(regs)) {
25039 + unsigned long cs = regs->cs & 0xFFFF;
25040 + unsigned long ip = ktva_ktla(regs->ip);
25041 +
25042 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
25043 + regs->ip = ip;
25044 + }
25045 +#endif
25046 +
25047 nmi_nesting_preprocess(regs);
25048
25049 nmi_enter();
25050 diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
25051 index 6d9582e..f746287 100644
25052 --- a/arch/x86/kernel/nmi_selftest.c
25053 +++ b/arch/x86/kernel/nmi_selftest.c
25054 @@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
25055 {
25056 /* trap all the unknown NMIs we may generate */
25057 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
25058 - __initdata);
25059 + __initconst);
25060 }
25061
25062 static void __init cleanup_nmi_testsuite(void)
25063 @@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
25064 unsigned long timeout;
25065
25066 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
25067 - NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
25068 + NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
25069 nmi_fail = FAILURE;
25070 return;
25071 }
25072 diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
25073 index bbb6c73..24a58ef 100644
25074 --- a/arch/x86/kernel/paravirt-spinlocks.c
25075 +++ b/arch/x86/kernel/paravirt-spinlocks.c
25076 @@ -8,7 +8,7 @@
25077
25078 #include <asm/paravirt.h>
25079
25080 -struct pv_lock_ops pv_lock_ops = {
25081 +struct pv_lock_ops pv_lock_ops __read_only = {
25082 #ifdef CONFIG_SMP
25083 .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
25084 .unlock_kick = paravirt_nop,
25085 diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
25086 index 1b10af8..0b58cbc 100644
25087 --- a/arch/x86/kernel/paravirt.c
25088 +++ b/arch/x86/kernel/paravirt.c
25089 @@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
25090 {
25091 return x;
25092 }
25093 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25094 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
25095 +#endif
25096
25097 void __init default_banner(void)
25098 {
25099 @@ -142,15 +145,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
25100 if (opfunc == NULL)
25101 /* If there's no function, patch it with a ud2a (BUG) */
25102 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
25103 - else if (opfunc == _paravirt_nop)
25104 + else if (opfunc == (void *)_paravirt_nop)
25105 /* If the operation is a nop, then nop the callsite */
25106 ret = paravirt_patch_nop();
25107
25108 /* identity functions just return their single argument */
25109 - else if (opfunc == _paravirt_ident_32)
25110 + else if (opfunc == (void *)_paravirt_ident_32)
25111 ret = paravirt_patch_ident_32(insnbuf, len);
25112 - else if (opfunc == _paravirt_ident_64)
25113 + else if (opfunc == (void *)_paravirt_ident_64)
25114 ret = paravirt_patch_ident_64(insnbuf, len);
25115 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25116 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
25117 + ret = paravirt_patch_ident_64(insnbuf, len);
25118 +#endif
25119
25120 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
25121 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
25122 @@ -175,7 +182,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
25123 if (insn_len > len || start == NULL)
25124 insn_len = len;
25125 else
25126 - memcpy(insnbuf, start, insn_len);
25127 + memcpy(insnbuf, ktla_ktva(start), insn_len);
25128
25129 return insn_len;
25130 }
25131 @@ -299,7 +306,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
25132 return this_cpu_read(paravirt_lazy_mode);
25133 }
25134
25135 -struct pv_info pv_info = {
25136 +struct pv_info pv_info __read_only = {
25137 .name = "bare hardware",
25138 .paravirt_enabled = 0,
25139 .kernel_rpl = 0,
25140 @@ -310,16 +317,16 @@ struct pv_info pv_info = {
25141 #endif
25142 };
25143
25144 -struct pv_init_ops pv_init_ops = {
25145 +struct pv_init_ops pv_init_ops __read_only = {
25146 .patch = native_patch,
25147 };
25148
25149 -struct pv_time_ops pv_time_ops = {
25150 +struct pv_time_ops pv_time_ops __read_only = {
25151 .sched_clock = native_sched_clock,
25152 .steal_clock = native_steal_clock,
25153 };
25154
25155 -__visible struct pv_irq_ops pv_irq_ops = {
25156 +__visible struct pv_irq_ops pv_irq_ops __read_only = {
25157 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
25158 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
25159 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
25160 @@ -331,7 +338,7 @@ __visible struct pv_irq_ops pv_irq_ops = {
25161 #endif
25162 };
25163
25164 -__visible struct pv_cpu_ops pv_cpu_ops = {
25165 +__visible struct pv_cpu_ops pv_cpu_ops __read_only = {
25166 .cpuid = native_cpuid,
25167 .get_debugreg = native_get_debugreg,
25168 .set_debugreg = native_set_debugreg,
25169 @@ -389,21 +396,26 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
25170 .end_context_switch = paravirt_nop,
25171 };
25172
25173 -struct pv_apic_ops pv_apic_ops = {
25174 +struct pv_apic_ops pv_apic_ops __read_only= {
25175 #ifdef CONFIG_X86_LOCAL_APIC
25176 .startup_ipi_hook = paravirt_nop,
25177 #endif
25178 };
25179
25180 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
25181 +#ifdef CONFIG_X86_32
25182 +#ifdef CONFIG_X86_PAE
25183 +/* 64-bit pagetable entries */
25184 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
25185 +#else
25186 /* 32-bit pagetable entries */
25187 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
25188 +#endif
25189 #else
25190 /* 64-bit pagetable entries */
25191 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
25192 #endif
25193
25194 -struct pv_mmu_ops pv_mmu_ops = {
25195 +struct pv_mmu_ops pv_mmu_ops __read_only = {
25196
25197 .read_cr2 = native_read_cr2,
25198 .write_cr2 = native_write_cr2,
25199 @@ -453,6 +465,7 @@ struct pv_mmu_ops pv_mmu_ops = {
25200 .make_pud = PTE_IDENT,
25201
25202 .set_pgd = native_set_pgd,
25203 + .set_pgd_batched = native_set_pgd_batched,
25204 #endif
25205 #endif /* PAGETABLE_LEVELS >= 3 */
25206
25207 @@ -473,6 +486,12 @@ struct pv_mmu_ops pv_mmu_ops = {
25208 },
25209
25210 .set_fixmap = native_set_fixmap,
25211 +
25212 +#ifdef CONFIG_PAX_KERNEXEC
25213 + .pax_open_kernel = native_pax_open_kernel,
25214 + .pax_close_kernel = native_pax_close_kernel,
25215 +#endif
25216 +
25217 };
25218
25219 EXPORT_SYMBOL_GPL(pv_time_ops);
25220 diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
25221 index 299d493..2ccb0ee 100644
25222 --- a/arch/x86/kernel/pci-calgary_64.c
25223 +++ b/arch/x86/kernel/pci-calgary_64.c
25224 @@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
25225 tce_space = be64_to_cpu(readq(target));
25226 tce_space = tce_space & TAR_SW_BITS;
25227
25228 - tce_space = tce_space & (~specified_table_size);
25229 + tce_space = tce_space & (~(unsigned long)specified_table_size);
25230 info->tce_space = (u64 *)__va(tce_space);
25231 }
25232 }
25233 diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
25234 index 35ccf75..7a15747 100644
25235 --- a/arch/x86/kernel/pci-iommu_table.c
25236 +++ b/arch/x86/kernel/pci-iommu_table.c
25237 @@ -2,7 +2,7 @@
25238 #include <asm/iommu_table.h>
25239 #include <linux/string.h>
25240 #include <linux/kallsyms.h>
25241 -
25242 +#include <linux/sched.h>
25243
25244 #define DEBUG 1
25245
25246 diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
25247 index 6c483ba..d10ce2f 100644
25248 --- a/arch/x86/kernel/pci-swiotlb.c
25249 +++ b/arch/x86/kernel/pci-swiotlb.c
25250 @@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
25251 void *vaddr, dma_addr_t dma_addr,
25252 struct dma_attrs *attrs)
25253 {
25254 - swiotlb_free_coherent(dev, size, vaddr, dma_addr);
25255 + swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
25256 }
25257
25258 static struct dma_map_ops swiotlb_dma_ops = {
25259 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
25260 index 3fb8d95..254dc51 100644
25261 --- a/arch/x86/kernel/process.c
25262 +++ b/arch/x86/kernel/process.c
25263 @@ -36,7 +36,8 @@
25264 * section. Since TSS's are completely CPU-local, we want them
25265 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
25266 */
25267 -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
25268 +struct tss_struct init_tss[NR_CPUS] __visible ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
25269 +EXPORT_SYMBOL(init_tss);
25270
25271 #ifdef CONFIG_X86_64
25272 static DEFINE_PER_CPU(unsigned char, is_idle);
25273 @@ -92,7 +93,7 @@ void arch_task_cache_init(void)
25274 task_xstate_cachep =
25275 kmem_cache_create("task_xstate", xstate_size,
25276 __alignof__(union thread_xstate),
25277 - SLAB_PANIC | SLAB_NOTRACK, NULL);
25278 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
25279 }
25280
25281 /*
25282 @@ -105,7 +106,7 @@ void exit_thread(void)
25283 unsigned long *bp = t->io_bitmap_ptr;
25284
25285 if (bp) {
25286 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
25287 + struct tss_struct *tss = init_tss + get_cpu();
25288
25289 t->io_bitmap_ptr = NULL;
25290 clear_thread_flag(TIF_IO_BITMAP);
25291 @@ -125,6 +126,9 @@ void flush_thread(void)
25292 {
25293 struct task_struct *tsk = current;
25294
25295 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
25296 + loadsegment(gs, 0);
25297 +#endif
25298 flush_ptrace_hw_breakpoint(tsk);
25299 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
25300 drop_init_fpu(tsk);
25301 @@ -271,7 +275,7 @@ static void __exit_idle(void)
25302 void exit_idle(void)
25303 {
25304 /* idle loop has pid 0 */
25305 - if (current->pid)
25306 + if (task_pid_nr(current))
25307 return;
25308 __exit_idle();
25309 }
25310 @@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
25311 return ret;
25312 }
25313 #endif
25314 -void stop_this_cpu(void *dummy)
25315 +__noreturn void stop_this_cpu(void *dummy)
25316 {
25317 local_irq_disable();
25318 /*
25319 @@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
25320 }
25321 early_param("idle", idle_setup);
25322
25323 -unsigned long arch_align_stack(unsigned long sp)
25324 +#ifdef CONFIG_PAX_RANDKSTACK
25325 +void pax_randomize_kstack(struct pt_regs *regs)
25326 {
25327 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
25328 - sp -= get_random_int() % 8192;
25329 - return sp & ~0xf;
25330 -}
25331 + struct thread_struct *thread = &current->thread;
25332 + unsigned long time;
25333
25334 -unsigned long arch_randomize_brk(struct mm_struct *mm)
25335 -{
25336 - unsigned long range_end = mm->brk + 0x02000000;
25337 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
25338 -}
25339 + if (!randomize_va_space)
25340 + return;
25341 +
25342 + if (v8086_mode(regs))
25343 + return;
25344
25345 + rdtscl(time);
25346 +
25347 + /* P4 seems to return a 0 LSB, ignore it */
25348 +#ifdef CONFIG_MPENTIUM4
25349 + time &= 0x3EUL;
25350 + time <<= 2;
25351 +#elif defined(CONFIG_X86_64)
25352 + time &= 0xFUL;
25353 + time <<= 4;
25354 +#else
25355 + time &= 0x1FUL;
25356 + time <<= 3;
25357 +#endif
25358 +
25359 + thread->sp0 ^= time;
25360 + load_sp0(init_tss + smp_processor_id(), thread);
25361 +
25362 +#ifdef CONFIG_X86_64
25363 + this_cpu_write(kernel_stack, thread->sp0);
25364 +#endif
25365 +}
25366 +#endif
25367 diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
25368 index 884f98f..ec23e04 100644
25369 --- a/arch/x86/kernel/process_32.c
25370 +++ b/arch/x86/kernel/process_32.c
25371 @@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
25372 unsigned long thread_saved_pc(struct task_struct *tsk)
25373 {
25374 return ((unsigned long *)tsk->thread.sp)[3];
25375 +//XXX return tsk->thread.eip;
25376 }
25377
25378 void __show_regs(struct pt_regs *regs, int all)
25379 @@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
25380 unsigned long sp;
25381 unsigned short ss, gs;
25382
25383 - if (user_mode_vm(regs)) {
25384 + if (user_mode(regs)) {
25385 sp = regs->sp;
25386 ss = regs->ss & 0xffff;
25387 - gs = get_user_gs(regs);
25388 } else {
25389 sp = kernel_stack_pointer(regs);
25390 savesegment(ss, ss);
25391 - savesegment(gs, gs);
25392 }
25393 + gs = get_user_gs(regs);
25394
25395 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
25396 (u16)regs->cs, regs->ip, regs->flags,
25397 - smp_processor_id());
25398 + raw_smp_processor_id());
25399 print_symbol("EIP is at %s\n", regs->ip);
25400
25401 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
25402 @@ -133,20 +133,21 @@ void release_thread(struct task_struct *dead_task)
25403 int copy_thread(unsigned long clone_flags, unsigned long sp,
25404 unsigned long arg, struct task_struct *p)
25405 {
25406 - struct pt_regs *childregs = task_pt_regs(p);
25407 + struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
25408 struct task_struct *tsk;
25409 int err;
25410
25411 p->thread.sp = (unsigned long) childregs;
25412 p->thread.sp0 = (unsigned long) (childregs+1);
25413 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
25414
25415 if (unlikely(p->flags & PF_KTHREAD)) {
25416 /* kernel thread */
25417 memset(childregs, 0, sizeof(struct pt_regs));
25418 p->thread.ip = (unsigned long) ret_from_kernel_thread;
25419 - task_user_gs(p) = __KERNEL_STACK_CANARY;
25420 - childregs->ds = __USER_DS;
25421 - childregs->es = __USER_DS;
25422 + savesegment(gs, childregs->gs);
25423 + childregs->ds = __KERNEL_DS;
25424 + childregs->es = __KERNEL_DS;
25425 childregs->fs = __KERNEL_PERCPU;
25426 childregs->bx = sp; /* function */
25427 childregs->bp = arg;
25428 @@ -253,7 +254,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25429 struct thread_struct *prev = &prev_p->thread,
25430 *next = &next_p->thread;
25431 int cpu = smp_processor_id();
25432 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
25433 + struct tss_struct *tss = init_tss + cpu;
25434 fpu_switch_t fpu;
25435
25436 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
25437 @@ -277,6 +278,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25438 */
25439 lazy_save_gs(prev->gs);
25440
25441 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25442 + __set_fs(task_thread_info(next_p)->addr_limit);
25443 +#endif
25444 +
25445 /*
25446 * Load the per-thread Thread-Local Storage descriptor.
25447 */
25448 @@ -307,6 +312,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25449 */
25450 arch_end_context_switch(next_p);
25451
25452 + this_cpu_write(current_task, next_p);
25453 + this_cpu_write(current_tinfo, &next_p->tinfo);
25454 +
25455 /*
25456 * Restore %gs if needed (which is common)
25457 */
25458 @@ -315,8 +323,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25459
25460 switch_fpu_finish(next_p, fpu);
25461
25462 - this_cpu_write(current_task, next_p);
25463 -
25464 return prev_p;
25465 }
25466
25467 @@ -346,4 +352,3 @@ unsigned long get_wchan(struct task_struct *p)
25468 } while (count++ < 16);
25469 return 0;
25470 }
25471 -
25472 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
25473 index bb1dc51..08dda7f 100644
25474 --- a/arch/x86/kernel/process_64.c
25475 +++ b/arch/x86/kernel/process_64.c
25476 @@ -158,10 +158,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
25477 struct pt_regs *childregs;
25478 struct task_struct *me = current;
25479
25480 - p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
25481 + p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
25482 childregs = task_pt_regs(p);
25483 p->thread.sp = (unsigned long) childregs;
25484 p->thread.usersp = me->thread.usersp;
25485 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
25486 set_tsk_thread_flag(p, TIF_FORK);
25487 p->fpu_counter = 0;
25488 p->thread.io_bitmap_ptr = NULL;
25489 @@ -172,6 +173,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
25490 p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs;
25491 savesegment(es, p->thread.es);
25492 savesegment(ds, p->thread.ds);
25493 + savesegment(ss, p->thread.ss);
25494 + BUG_ON(p->thread.ss == __UDEREF_KERNEL_DS);
25495 memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
25496
25497 if (unlikely(p->flags & PF_KTHREAD)) {
25498 @@ -280,7 +283,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25499 struct thread_struct *prev = &prev_p->thread;
25500 struct thread_struct *next = &next_p->thread;
25501 int cpu = smp_processor_id();
25502 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
25503 + struct tss_struct *tss = init_tss + cpu;
25504 unsigned fsindex, gsindex;
25505 fpu_switch_t fpu;
25506
25507 @@ -303,6 +306,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25508 if (unlikely(next->ds | prev->ds))
25509 loadsegment(ds, next->ds);
25510
25511 + savesegment(ss, prev->ss);
25512 + if (unlikely(next->ss != prev->ss))
25513 + loadsegment(ss, next->ss);
25514
25515 /* We must save %fs and %gs before load_TLS() because
25516 * %fs and %gs may be cleared by load_TLS().
25517 @@ -362,10 +368,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
25518 prev->usersp = this_cpu_read(old_rsp);
25519 this_cpu_write(old_rsp, next->usersp);
25520 this_cpu_write(current_task, next_p);
25521 + this_cpu_write(current_tinfo, &next_p->tinfo);
25522
25523 - this_cpu_write(kernel_stack,
25524 - (unsigned long)task_stack_page(next_p) +
25525 - THREAD_SIZE - KERNEL_STACK_OFFSET);
25526 + this_cpu_write(kernel_stack, next->sp0);
25527
25528 /*
25529 * Now maybe reload the debug registers and handle I/O bitmaps
25530 @@ -434,12 +439,11 @@ unsigned long get_wchan(struct task_struct *p)
25531 if (!p || p == current || p->state == TASK_RUNNING)
25532 return 0;
25533 stack = (unsigned long)task_stack_page(p);
25534 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
25535 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
25536 return 0;
25537 fp = *(u64 *)(p->thread.sp);
25538 do {
25539 - if (fp < (unsigned long)stack ||
25540 - fp >= (unsigned long)stack+THREAD_SIZE)
25541 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
25542 return 0;
25543 ip = *(u64 *)(fp+8);
25544 if (!in_sched_functions(ip))
25545 diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
25546 index 7461f50..1334029 100644
25547 --- a/arch/x86/kernel/ptrace.c
25548 +++ b/arch/x86/kernel/ptrace.c
25549 @@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
25550 {
25551 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
25552 unsigned long sp = (unsigned long)&regs->sp;
25553 - struct thread_info *tinfo;
25554
25555 - if (context == (sp & ~(THREAD_SIZE - 1)))
25556 + if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
25557 return sp;
25558
25559 - tinfo = (struct thread_info *)context;
25560 - if (tinfo->previous_esp)
25561 - return tinfo->previous_esp;
25562 + sp = *(unsigned long *)context;
25563 + if (sp)
25564 + return sp;
25565
25566 return (unsigned long)regs;
25567 }
25568 @@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
25569 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
25570 {
25571 int i;
25572 - int dr7 = 0;
25573 + unsigned long dr7 = 0;
25574 struct arch_hw_breakpoint *info;
25575
25576 for (i = 0; i < HBP_NUM; i++) {
25577 @@ -822,7 +821,7 @@ long arch_ptrace(struct task_struct *child, long request,
25578 unsigned long addr, unsigned long data)
25579 {
25580 int ret;
25581 - unsigned long __user *datap = (unsigned long __user *)data;
25582 + unsigned long __user *datap = (__force unsigned long __user *)data;
25583
25584 switch (request) {
25585 /* read the word at location addr in the USER area. */
25586 @@ -907,14 +906,14 @@ long arch_ptrace(struct task_struct *child, long request,
25587 if ((int) addr < 0)
25588 return -EIO;
25589 ret = do_get_thread_area(child, addr,
25590 - (struct user_desc __user *)data);
25591 + (__force struct user_desc __user *) data);
25592 break;
25593
25594 case PTRACE_SET_THREAD_AREA:
25595 if ((int) addr < 0)
25596 return -EIO;
25597 ret = do_set_thread_area(child, addr,
25598 - (struct user_desc __user *)data, 0);
25599 + (__force struct user_desc __user *) data, 0);
25600 break;
25601 #endif
25602
25603 @@ -1292,7 +1291,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
25604
25605 #ifdef CONFIG_X86_64
25606
25607 -static struct user_regset x86_64_regsets[] __read_mostly = {
25608 +static user_regset_no_const x86_64_regsets[] __read_only = {
25609 [REGSET_GENERAL] = {
25610 .core_note_type = NT_PRSTATUS,
25611 .n = sizeof(struct user_regs_struct) / sizeof(long),
25612 @@ -1333,7 +1332,7 @@ static const struct user_regset_view user_x86_64_view = {
25613 #endif /* CONFIG_X86_64 */
25614
25615 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
25616 -static struct user_regset x86_32_regsets[] __read_mostly = {
25617 +static user_regset_no_const x86_32_regsets[] __read_only = {
25618 [REGSET_GENERAL] = {
25619 .core_note_type = NT_PRSTATUS,
25620 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
25621 @@ -1386,7 +1385,7 @@ static const struct user_regset_view user_x86_32_view = {
25622 */
25623 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
25624
25625 -void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
25626 +void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
25627 {
25628 #ifdef CONFIG_X86_64
25629 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
25630 @@ -1421,7 +1420,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
25631 memset(info, 0, sizeof(*info));
25632 info->si_signo = SIGTRAP;
25633 info->si_code = si_code;
25634 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
25635 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
25636 }
25637
25638 void user_single_step_siginfo(struct task_struct *tsk,
25639 @@ -1450,6 +1449,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
25640 # define IS_IA32 0
25641 #endif
25642
25643 +#ifdef CONFIG_GRKERNSEC_SETXID
25644 +extern void gr_delayed_cred_worker(void);
25645 +#endif
25646 +
25647 /*
25648 * We must return the syscall number to actually look up in the table.
25649 * This can be -1L to skip running any syscall at all.
25650 @@ -1460,6 +1463,11 @@ long syscall_trace_enter(struct pt_regs *regs)
25651
25652 user_exit();
25653
25654 +#ifdef CONFIG_GRKERNSEC_SETXID
25655 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
25656 + gr_delayed_cred_worker();
25657 +#endif
25658 +
25659 /*
25660 * If we stepped into a sysenter/syscall insn, it trapped in
25661 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
25662 @@ -1515,6 +1523,11 @@ void syscall_trace_leave(struct pt_regs *regs)
25663 */
25664 user_exit();
25665
25666 +#ifdef CONFIG_GRKERNSEC_SETXID
25667 + if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
25668 + gr_delayed_cred_worker();
25669 +#endif
25670 +
25671 audit_syscall_exit(regs);
25672
25673 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
25674 diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
25675 index a16bae3..1f65f25 100644
25676 --- a/arch/x86/kernel/pvclock.c
25677 +++ b/arch/x86/kernel/pvclock.c
25678 @@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
25679 return pv_tsc_khz;
25680 }
25681
25682 -static atomic64_t last_value = ATOMIC64_INIT(0);
25683 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
25684
25685 void pvclock_resume(void)
25686 {
25687 - atomic64_set(&last_value, 0);
25688 + atomic64_set_unchecked(&last_value, 0);
25689 }
25690
25691 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
25692 @@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
25693 * updating at the same time, and one of them could be slightly behind,
25694 * making the assumption that last_value always go forward fail to hold.
25695 */
25696 - last = atomic64_read(&last_value);
25697 + last = atomic64_read_unchecked(&last_value);
25698 do {
25699 if (ret < last)
25700 return last;
25701 - last = atomic64_cmpxchg(&last_value, last, ret);
25702 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
25703 } while (unlikely(last != ret));
25704
25705 return ret;
25706 diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
25707 index 618ce26..ec7e21c 100644
25708 --- a/arch/x86/kernel/reboot.c
25709 +++ b/arch/x86/kernel/reboot.c
25710 @@ -68,6 +68,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
25711
25712 void __noreturn machine_real_restart(unsigned int type)
25713 {
25714 +
25715 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
25716 + struct desc_struct *gdt;
25717 +#endif
25718 +
25719 local_irq_disable();
25720
25721 /*
25722 @@ -95,7 +100,29 @@ void __noreturn machine_real_restart(unsigned int type)
25723
25724 /* Jump to the identity-mapped low memory code */
25725 #ifdef CONFIG_X86_32
25726 - asm volatile("jmpl *%0" : :
25727 +
25728 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
25729 + gdt = get_cpu_gdt_table(smp_processor_id());
25730 + pax_open_kernel();
25731 +#ifdef CONFIG_PAX_MEMORY_UDEREF
25732 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
25733 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
25734 + loadsegment(ds, __KERNEL_DS);
25735 + loadsegment(es, __KERNEL_DS);
25736 + loadsegment(ss, __KERNEL_DS);
25737 +#endif
25738 +#ifdef CONFIG_PAX_KERNEXEC
25739 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
25740 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
25741 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
25742 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
25743 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
25744 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
25745 +#endif
25746 + pax_close_kernel();
25747 +#endif
25748 +
25749 + asm volatile("ljmpl *%0" : :
25750 "rm" (real_mode_header->machine_real_restart_asm),
25751 "a" (type));
25752 #else
25753 @@ -466,7 +493,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
25754 * try to force a triple fault and then cycle between hitting the keyboard
25755 * controller and doing that
25756 */
25757 -static void native_machine_emergency_restart(void)
25758 +static void __noreturn native_machine_emergency_restart(void)
25759 {
25760 int i;
25761 int attempt = 0;
25762 @@ -575,13 +602,13 @@ void native_machine_shutdown(void)
25763 #endif
25764 }
25765
25766 -static void __machine_emergency_restart(int emergency)
25767 +static void __noreturn __machine_emergency_restart(int emergency)
25768 {
25769 reboot_emergency = emergency;
25770 machine_ops.emergency_restart();
25771 }
25772
25773 -static void native_machine_restart(char *__unused)
25774 +static void __noreturn native_machine_restart(char *__unused)
25775 {
25776 pr_notice("machine restart\n");
25777
25778 @@ -590,7 +617,7 @@ static void native_machine_restart(char *__unused)
25779 __machine_emergency_restart(0);
25780 }
25781
25782 -static void native_machine_halt(void)
25783 +static void __noreturn native_machine_halt(void)
25784 {
25785 /* Stop other cpus and apics */
25786 machine_shutdown();
25787 @@ -600,7 +627,7 @@ static void native_machine_halt(void)
25788 stop_this_cpu(NULL);
25789 }
25790
25791 -static void native_machine_power_off(void)
25792 +static void __noreturn native_machine_power_off(void)
25793 {
25794 if (pm_power_off) {
25795 if (!reboot_force)
25796 @@ -609,9 +636,10 @@ static void native_machine_power_off(void)
25797 }
25798 /* A fallback in case there is no PM info available */
25799 tboot_shutdown(TB_SHUTDOWN_HALT);
25800 + unreachable();
25801 }
25802
25803 -struct machine_ops machine_ops = {
25804 +struct machine_ops machine_ops __read_only = {
25805 .power_off = native_machine_power_off,
25806 .shutdown = native_machine_shutdown,
25807 .emergency_restart = native_machine_emergency_restart,
25808 diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
25809 index c8e41e9..64049ef 100644
25810 --- a/arch/x86/kernel/reboot_fixups_32.c
25811 +++ b/arch/x86/kernel/reboot_fixups_32.c
25812 @@ -57,7 +57,7 @@ struct device_fixup {
25813 unsigned int vendor;
25814 unsigned int device;
25815 void (*reboot_fixup)(struct pci_dev *);
25816 -};
25817 +} __do_const;
25818
25819 /*
25820 * PCI ids solely used for fixups_table go here
25821 diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
25822 index 3fd2c69..16ef367 100644
25823 --- a/arch/x86/kernel/relocate_kernel_64.S
25824 +++ b/arch/x86/kernel/relocate_kernel_64.S
25825 @@ -11,6 +11,7 @@
25826 #include <asm/kexec.h>
25827 #include <asm/processor-flags.h>
25828 #include <asm/pgtable_types.h>
25829 +#include <asm/alternative-asm.h>
25830
25831 /*
25832 * Must be relocatable PIC code callable as a C function
25833 @@ -96,8 +97,7 @@ relocate_kernel:
25834
25835 /* jump to identity mapped page */
25836 addq $(identity_mapped - relocate_kernel), %r8
25837 - pushq %r8
25838 - ret
25839 + jmp *%r8
25840
25841 identity_mapped:
25842 /* set return address to 0 if not preserving context */
25843 @@ -167,6 +167,7 @@ identity_mapped:
25844 xorl %r14d, %r14d
25845 xorl %r15d, %r15d
25846
25847 + pax_force_retaddr 0, 1
25848 ret
25849
25850 1:
25851 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
25852 index f0de629..a4978a8f 100644
25853 --- a/arch/x86/kernel/setup.c
25854 +++ b/arch/x86/kernel/setup.c
25855 @@ -110,6 +110,7 @@
25856 #include <asm/mce.h>
25857 #include <asm/alternative.h>
25858 #include <asm/prom.h>
25859 +#include <asm/boot.h>
25860
25861 /*
25862 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
25863 @@ -205,12 +206,50 @@ EXPORT_SYMBOL(boot_cpu_data);
25864 #endif
25865
25866
25867 -#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
25868 -__visible unsigned long mmu_cr4_features;
25869 +#ifdef CONFIG_X86_64
25870 +__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE;
25871 +#elif defined(CONFIG_X86_PAE)
25872 +__visible unsigned long mmu_cr4_features __read_only = X86_CR4_PAE;
25873 #else
25874 -__visible unsigned long mmu_cr4_features = X86_CR4_PAE;
25875 +__visible unsigned long mmu_cr4_features __read_only;
25876 #endif
25877
25878 +void set_in_cr4(unsigned long mask)
25879 +{
25880 + unsigned long cr4 = read_cr4();
25881 +
25882 + if ((cr4 & mask) == mask && cr4 == mmu_cr4_features)
25883 + return;
25884 +
25885 + pax_open_kernel();
25886 + mmu_cr4_features |= mask;
25887 + pax_close_kernel();
25888 +
25889 + if (trampoline_cr4_features)
25890 + *trampoline_cr4_features = mmu_cr4_features;
25891 + cr4 |= mask;
25892 + write_cr4(cr4);
25893 +}
25894 +EXPORT_SYMBOL(set_in_cr4);
25895 +
25896 +void clear_in_cr4(unsigned long mask)
25897 +{
25898 + unsigned long cr4 = read_cr4();
25899 +
25900 + if (!(cr4 & mask) && cr4 == mmu_cr4_features)
25901 + return;
25902 +
25903 + pax_open_kernel();
25904 + mmu_cr4_features &= ~mask;
25905 + pax_close_kernel();
25906 +
25907 + if (trampoline_cr4_features)
25908 + *trampoline_cr4_features = mmu_cr4_features;
25909 + cr4 &= ~mask;
25910 + write_cr4(cr4);
25911 +}
25912 +EXPORT_SYMBOL(clear_in_cr4);
25913 +
25914 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
25915 int bootloader_type, bootloader_version;
25916
25917 @@ -768,7 +807,7 @@ static void __init trim_bios_range(void)
25918 * area (640->1Mb) as ram even though it is not.
25919 * take them out.
25920 */
25921 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
25922 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
25923
25924 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
25925 }
25926 @@ -776,7 +815,7 @@ static void __init trim_bios_range(void)
25927 /* called before trim_bios_range() to spare extra sanitize */
25928 static void __init e820_add_kernel_range(void)
25929 {
25930 - u64 start = __pa_symbol(_text);
25931 + u64 start = __pa_symbol(ktla_ktva(_text));
25932 u64 size = __pa_symbol(_end) - start;
25933
25934 /*
25935 @@ -838,8 +877,12 @@ static void __init trim_low_memory_range(void)
25936
25937 void __init setup_arch(char **cmdline_p)
25938 {
25939 +#ifdef CONFIG_X86_32
25940 + memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
25941 +#else
25942 memblock_reserve(__pa_symbol(_text),
25943 (unsigned long)__bss_stop - (unsigned long)_text);
25944 +#endif
25945
25946 early_reserve_initrd();
25947
25948 @@ -931,14 +974,14 @@ void __init setup_arch(char **cmdline_p)
25949
25950 if (!boot_params.hdr.root_flags)
25951 root_mountflags &= ~MS_RDONLY;
25952 - init_mm.start_code = (unsigned long) _text;
25953 - init_mm.end_code = (unsigned long) _etext;
25954 + init_mm.start_code = ktla_ktva((unsigned long) _text);
25955 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
25956 init_mm.end_data = (unsigned long) _edata;
25957 init_mm.brk = _brk_end;
25958
25959 - code_resource.start = __pa_symbol(_text);
25960 - code_resource.end = __pa_symbol(_etext)-1;
25961 - data_resource.start = __pa_symbol(_etext);
25962 + code_resource.start = __pa_symbol(ktla_ktva(_text));
25963 + code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
25964 + data_resource.start = __pa_symbol(_sdata);
25965 data_resource.end = __pa_symbol(_edata)-1;
25966 bss_resource.start = __pa_symbol(__bss_start);
25967 bss_resource.end = __pa_symbol(__bss_stop)-1;
25968 diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
25969 index 5cdff03..80fa283 100644
25970 --- a/arch/x86/kernel/setup_percpu.c
25971 +++ b/arch/x86/kernel/setup_percpu.c
25972 @@ -21,19 +21,17 @@
25973 #include <asm/cpu.h>
25974 #include <asm/stackprotector.h>
25975
25976 -DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
25977 +#ifdef CONFIG_SMP
25978 +DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
25979 EXPORT_PER_CPU_SYMBOL(cpu_number);
25980 +#endif
25981
25982 -#ifdef CONFIG_X86_64
25983 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
25984 -#else
25985 -#define BOOT_PERCPU_OFFSET 0
25986 -#endif
25987
25988 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
25989 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
25990
25991 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
25992 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
25993 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
25994 };
25995 EXPORT_SYMBOL(__per_cpu_offset);
25996 @@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
25997 {
25998 #ifdef CONFIG_NEED_MULTIPLE_NODES
25999 pg_data_t *last = NULL;
26000 - unsigned int cpu;
26001 + int cpu;
26002
26003 for_each_possible_cpu(cpu) {
26004 int node = early_cpu_to_node(cpu);
26005 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
26006 {
26007 #ifdef CONFIG_X86_32
26008 struct desc_struct gdt;
26009 + unsigned long base = per_cpu_offset(cpu);
26010
26011 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
26012 - 0x2 | DESCTYPE_S, 0x8);
26013 - gdt.s = 1;
26014 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
26015 + 0x83 | DESCTYPE_S, 0xC);
26016 write_gdt_entry(get_cpu_gdt_table(cpu),
26017 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
26018 #endif
26019 @@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
26020 /* alrighty, percpu areas up and running */
26021 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
26022 for_each_possible_cpu(cpu) {
26023 +#ifdef CONFIG_CC_STACKPROTECTOR
26024 +#ifdef CONFIG_X86_32
26025 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
26026 +#endif
26027 +#endif
26028 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
26029 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
26030 per_cpu(cpu_number, cpu) = cpu;
26031 @@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
26032 */
26033 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
26034 #endif
26035 +#ifdef CONFIG_CC_STACKPROTECTOR
26036 +#ifdef CONFIG_X86_32
26037 + if (!cpu)
26038 + per_cpu(stack_canary.canary, cpu) = canary;
26039 +#endif
26040 +#endif
26041 /*
26042 * Up to this point, the boot CPU has been using .init.data
26043 * area. Reload any changed state for the boot CPU.
26044 diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
26045 index 9e5de68..16c53cb 100644
26046 --- a/arch/x86/kernel/signal.c
26047 +++ b/arch/x86/kernel/signal.c
26048 @@ -190,7 +190,7 @@ static unsigned long align_sigframe(unsigned long sp)
26049 * Align the stack pointer according to the i386 ABI,
26050 * i.e. so that on function entry ((sp + 4) & 15) == 0.
26051 */
26052 - sp = ((sp + 4) & -16ul) - 4;
26053 + sp = ((sp - 12) & -16ul) - 4;
26054 #else /* !CONFIG_X86_32 */
26055 sp = round_down(sp, 16) - 8;
26056 #endif
26057 @@ -298,9 +298,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
26058 }
26059
26060 if (current->mm->context.vdso)
26061 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
26062 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
26063 else
26064 - restorer = &frame->retcode;
26065 + restorer = (void __user *)&frame->retcode;
26066 if (ksig->ka.sa.sa_flags & SA_RESTORER)
26067 restorer = ksig->ka.sa.sa_restorer;
26068
26069 @@ -314,7 +314,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
26070 * reasons and because gdb uses it as a signature to notice
26071 * signal handler stack frames.
26072 */
26073 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
26074 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
26075
26076 if (err)
26077 return -EFAULT;
26078 @@ -361,7 +361,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
26079 save_altstack_ex(&frame->uc.uc_stack, regs->sp);
26080
26081 /* Set up to return from userspace. */
26082 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
26083 + if (current->mm->context.vdso)
26084 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
26085 + else
26086 + restorer = (void __user *)&frame->retcode;
26087 if (ksig->ka.sa.sa_flags & SA_RESTORER)
26088 restorer = ksig->ka.sa.sa_restorer;
26089 put_user_ex(restorer, &frame->pretcode);
26090 @@ -373,7 +376,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
26091 * reasons and because gdb uses it as a signature to notice
26092 * signal handler stack frames.
26093 */
26094 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
26095 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
26096 } put_user_catch(err);
26097
26098 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
26099 @@ -609,7 +612,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
26100 {
26101 int usig = signr_convert(ksig->sig);
26102 sigset_t *set = sigmask_to_save();
26103 - compat_sigset_t *cset = (compat_sigset_t *) set;
26104 + sigset_t sigcopy;
26105 + compat_sigset_t *cset;
26106 +
26107 + sigcopy = *set;
26108 +
26109 + cset = (compat_sigset_t *) &sigcopy;
26110
26111 /* Set up the stack frame */
26112 if (is_ia32_frame()) {
26113 @@ -620,7 +628,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
26114 } else if (is_x32_frame()) {
26115 return x32_setup_rt_frame(ksig, cset, regs);
26116 } else {
26117 - return __setup_rt_frame(ksig->sig, ksig, set, regs);
26118 + return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
26119 }
26120 }
26121
26122 diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
26123 index 7c3a5a6..f0a8961 100644
26124 --- a/arch/x86/kernel/smp.c
26125 +++ b/arch/x86/kernel/smp.c
26126 @@ -341,7 +341,7 @@ static int __init nonmi_ipi_setup(char *str)
26127
26128 __setup("nonmi_ipi", nonmi_ipi_setup);
26129
26130 -struct smp_ops smp_ops = {
26131 +struct smp_ops smp_ops __read_only = {
26132 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
26133 .smp_prepare_cpus = native_smp_prepare_cpus,
26134 .smp_cpus_done = native_smp_cpus_done,
26135 diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
26136 index 6cacab6..750636a 100644
26137 --- a/arch/x86/kernel/smpboot.c
26138 +++ b/arch/x86/kernel/smpboot.c
26139 @@ -251,14 +251,18 @@ static void notrace start_secondary(void *unused)
26140
26141 enable_start_cpu0 = 0;
26142
26143 -#ifdef CONFIG_X86_32
26144 - /* switch away from the initial page table */
26145 - load_cr3(swapper_pg_dir);
26146 - __flush_tlb_all();
26147 -#endif
26148 -
26149 /* otherwise gcc will move up smp_processor_id before the cpu_init */
26150 barrier();
26151 +
26152 + /* switch away from the initial page table */
26153 +#ifdef CONFIG_PAX_PER_CPU_PGD
26154 + load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
26155 + __flush_tlb_all();
26156 +#elif defined(CONFIG_X86_32)
26157 + load_cr3(swapper_pg_dir);
26158 + __flush_tlb_all();
26159 +#endif
26160 +
26161 /*
26162 * Check TSC synchronization with the BP:
26163 */
26164 @@ -749,6 +753,7 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
26165 idle->thread.sp = (unsigned long) (((struct pt_regs *)
26166 (THREAD_SIZE + task_stack_page(idle))) - 1);
26167 per_cpu(current_task, cpu) = idle;
26168 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
26169
26170 #ifdef CONFIG_X86_32
26171 /* Stack for startup_32 can be just as for start_secondary onwards */
26172 @@ -756,11 +761,13 @@ static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
26173 #else
26174 clear_tsk_thread_flag(idle, TIF_FORK);
26175 initial_gs = per_cpu_offset(cpu);
26176 - per_cpu(kernel_stack, cpu) =
26177 - (unsigned long)task_stack_page(idle) -
26178 - KERNEL_STACK_OFFSET + THREAD_SIZE;
26179 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
26180 #endif
26181 +
26182 + pax_open_kernel();
26183 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
26184 + pax_close_kernel();
26185 +
26186 initial_code = (unsigned long)start_secondary;
26187 stack_start = idle->thread.sp;
26188
26189 @@ -909,6 +916,15 @@ int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
26190 /* the FPU context is blank, nobody can own it */
26191 __cpu_disable_lazy_restore(cpu);
26192
26193 +#ifdef CONFIG_PAX_PER_CPU_PGD
26194 + clone_pgd_range(get_cpu_pgd(cpu, kernel) + KERNEL_PGD_BOUNDARY,
26195 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26196 + KERNEL_PGD_PTRS);
26197 + clone_pgd_range(get_cpu_pgd(cpu, user) + KERNEL_PGD_BOUNDARY,
26198 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
26199 + KERNEL_PGD_PTRS);
26200 +#endif
26201 +
26202 err = do_boot_cpu(apicid, cpu, tidle);
26203 if (err) {
26204 pr_debug("do_boot_cpu failed %d\n", err);
26205 diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
26206 index 9b4d51d..5d28b58 100644
26207 --- a/arch/x86/kernel/step.c
26208 +++ b/arch/x86/kernel/step.c
26209 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
26210 struct desc_struct *desc;
26211 unsigned long base;
26212
26213 - seg &= ~7UL;
26214 + seg >>= 3;
26215
26216 mutex_lock(&child->mm->context.lock);
26217 - if (unlikely((seg >> 3) >= child->mm->context.size))
26218 + if (unlikely(seg >= child->mm->context.size))
26219 addr = -1L; /* bogus selector, access would fault */
26220 else {
26221 desc = child->mm->context.ldt + seg;
26222 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
26223 addr += base;
26224 }
26225 mutex_unlock(&child->mm->context.lock);
26226 - }
26227 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
26228 + addr = ktla_ktva(addr);
26229
26230 return addr;
26231 }
26232 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
26233 unsigned char opcode[15];
26234 unsigned long addr = convert_ip_to_linear(child, regs);
26235
26236 + if (addr == -EINVAL)
26237 + return 0;
26238 +
26239 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
26240 for (i = 0; i < copied; i++) {
26241 switch (opcode[i]) {
26242 diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
26243 new file mode 100644
26244 index 0000000..5877189
26245 --- /dev/null
26246 +++ b/arch/x86/kernel/sys_i386_32.c
26247 @@ -0,0 +1,189 @@
26248 +/*
26249 + * This file contains various random system calls that
26250 + * have a non-standard calling sequence on the Linux/i386
26251 + * platform.
26252 + */
26253 +
26254 +#include <linux/errno.h>
26255 +#include <linux/sched.h>
26256 +#include <linux/mm.h>
26257 +#include <linux/fs.h>
26258 +#include <linux/smp.h>
26259 +#include <linux/sem.h>
26260 +#include <linux/msg.h>
26261 +#include <linux/shm.h>
26262 +#include <linux/stat.h>
26263 +#include <linux/syscalls.h>
26264 +#include <linux/mman.h>
26265 +#include <linux/file.h>
26266 +#include <linux/utsname.h>
26267 +#include <linux/ipc.h>
26268 +#include <linux/elf.h>
26269 +
26270 +#include <linux/uaccess.h>
26271 +#include <linux/unistd.h>
26272 +
26273 +#include <asm/syscalls.h>
26274 +
26275 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
26276 +{
26277 + unsigned long pax_task_size = TASK_SIZE;
26278 +
26279 +#ifdef CONFIG_PAX_SEGMEXEC
26280 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
26281 + pax_task_size = SEGMEXEC_TASK_SIZE;
26282 +#endif
26283 +
26284 + if (flags & MAP_FIXED)
26285 + if (len > pax_task_size || addr > pax_task_size - len)
26286 + return -EINVAL;
26287 +
26288 + return 0;
26289 +}
26290 +
26291 +/*
26292 + * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
26293 + */
26294 +static unsigned long get_align_mask(void)
26295 +{
26296 + if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
26297 + return 0;
26298 +
26299 + if (!(current->flags & PF_RANDOMIZE))
26300 + return 0;
26301 +
26302 + return va_align.mask;
26303 +}
26304 +
26305 +unsigned long
26306 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
26307 + unsigned long len, unsigned long pgoff, unsigned long flags)
26308 +{
26309 + struct mm_struct *mm = current->mm;
26310 + struct vm_area_struct *vma;
26311 + unsigned long pax_task_size = TASK_SIZE;
26312 + struct vm_unmapped_area_info info;
26313 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26314 +
26315 +#ifdef CONFIG_PAX_SEGMEXEC
26316 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
26317 + pax_task_size = SEGMEXEC_TASK_SIZE;
26318 +#endif
26319 +
26320 + pax_task_size -= PAGE_SIZE;
26321 +
26322 + if (len > pax_task_size)
26323 + return -ENOMEM;
26324 +
26325 + if (flags & MAP_FIXED)
26326 + return addr;
26327 +
26328 +#ifdef CONFIG_PAX_RANDMMAP
26329 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26330 +#endif
26331 +
26332 + if (addr) {
26333 + addr = PAGE_ALIGN(addr);
26334 + if (pax_task_size - len >= addr) {
26335 + vma = find_vma(mm, addr);
26336 + if (check_heap_stack_gap(vma, addr, len, offset))
26337 + return addr;
26338 + }
26339 + }
26340 +
26341 + info.flags = 0;
26342 + info.length = len;
26343 + info.align_mask = filp ? get_align_mask() : 0;
26344 + info.align_offset = pgoff << PAGE_SHIFT;
26345 + info.threadstack_offset = offset;
26346 +
26347 +#ifdef CONFIG_PAX_PAGEEXEC
26348 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
26349 + info.low_limit = 0x00110000UL;
26350 + info.high_limit = mm->start_code;
26351 +
26352 +#ifdef CONFIG_PAX_RANDMMAP
26353 + if (mm->pax_flags & MF_PAX_RANDMMAP)
26354 + info.low_limit += mm->delta_mmap & 0x03FFF000UL;
26355 +#endif
26356 +
26357 + if (info.low_limit < info.high_limit) {
26358 + addr = vm_unmapped_area(&info);
26359 + if (!IS_ERR_VALUE(addr))
26360 + return addr;
26361 + }
26362 + } else
26363 +#endif
26364 +
26365 + info.low_limit = mm->mmap_base;
26366 + info.high_limit = pax_task_size;
26367 +
26368 + return vm_unmapped_area(&info);
26369 +}
26370 +
26371 +unsigned long
26372 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26373 + const unsigned long len, const unsigned long pgoff,
26374 + const unsigned long flags)
26375 +{
26376 + struct vm_area_struct *vma;
26377 + struct mm_struct *mm = current->mm;
26378 + unsigned long addr = addr0, pax_task_size = TASK_SIZE;
26379 + struct vm_unmapped_area_info info;
26380 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26381 +
26382 +#ifdef CONFIG_PAX_SEGMEXEC
26383 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
26384 + pax_task_size = SEGMEXEC_TASK_SIZE;
26385 +#endif
26386 +
26387 + pax_task_size -= PAGE_SIZE;
26388 +
26389 + /* requested length too big for entire address space */
26390 + if (len > pax_task_size)
26391 + return -ENOMEM;
26392 +
26393 + if (flags & MAP_FIXED)
26394 + return addr;
26395 +
26396 +#ifdef CONFIG_PAX_PAGEEXEC
26397 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
26398 + goto bottomup;
26399 +#endif
26400 +
26401 +#ifdef CONFIG_PAX_RANDMMAP
26402 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26403 +#endif
26404 +
26405 + /* requesting a specific address */
26406 + if (addr) {
26407 + addr = PAGE_ALIGN(addr);
26408 + if (pax_task_size - len >= addr) {
26409 + vma = find_vma(mm, addr);
26410 + if (check_heap_stack_gap(vma, addr, len, offset))
26411 + return addr;
26412 + }
26413 + }
26414 +
26415 + info.flags = VM_UNMAPPED_AREA_TOPDOWN;
26416 + info.length = len;
26417 + info.low_limit = PAGE_SIZE;
26418 + info.high_limit = mm->mmap_base;
26419 + info.align_mask = filp ? get_align_mask() : 0;
26420 + info.align_offset = pgoff << PAGE_SHIFT;
26421 + info.threadstack_offset = offset;
26422 +
26423 + addr = vm_unmapped_area(&info);
26424 + if (!(addr & ~PAGE_MASK))
26425 + return addr;
26426 + VM_BUG_ON(addr != -ENOMEM);
26427 +
26428 +bottomup:
26429 + /*
26430 + * A failed mmap() very likely causes application failure,
26431 + * so fall back to the bottom-up function here. This scenario
26432 + * can happen with large stack limits and large mmap()
26433 + * allocations.
26434 + */
26435 + return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
26436 +}
26437 diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
26438 index 30277e2..5664a29 100644
26439 --- a/arch/x86/kernel/sys_x86_64.c
26440 +++ b/arch/x86/kernel/sys_x86_64.c
26441 @@ -81,8 +81,8 @@ out:
26442 return error;
26443 }
26444
26445 -static void find_start_end(unsigned long flags, unsigned long *begin,
26446 - unsigned long *end)
26447 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
26448 + unsigned long *begin, unsigned long *end)
26449 {
26450 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
26451 unsigned long new_begin;
26452 @@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
26453 *begin = new_begin;
26454 }
26455 } else {
26456 - *begin = current->mm->mmap_legacy_base;
26457 + *begin = mm->mmap_legacy_base;
26458 *end = TASK_SIZE;
26459 }
26460 }
26461 @@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
26462 struct vm_area_struct *vma;
26463 struct vm_unmapped_area_info info;
26464 unsigned long begin, end;
26465 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26466
26467 if (flags & MAP_FIXED)
26468 return addr;
26469
26470 - find_start_end(flags, &begin, &end);
26471 + find_start_end(mm, flags, &begin, &end);
26472
26473 if (len > end)
26474 return -ENOMEM;
26475
26476 +#ifdef CONFIG_PAX_RANDMMAP
26477 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26478 +#endif
26479 +
26480 if (addr) {
26481 addr = PAGE_ALIGN(addr);
26482 vma = find_vma(mm, addr);
26483 - if (end - len >= addr &&
26484 - (!vma || addr + len <= vma->vm_start))
26485 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
26486 return addr;
26487 }
26488
26489 @@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
26490 info.high_limit = end;
26491 info.align_mask = filp ? get_align_mask() : 0;
26492 info.align_offset = pgoff << PAGE_SHIFT;
26493 + info.threadstack_offset = offset;
26494 return vm_unmapped_area(&info);
26495 }
26496
26497 @@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26498 struct mm_struct *mm = current->mm;
26499 unsigned long addr = addr0;
26500 struct vm_unmapped_area_info info;
26501 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
26502
26503 /* requested length too big for entire address space */
26504 if (len > TASK_SIZE)
26505 @@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26506 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
26507 goto bottomup;
26508
26509 +#ifdef CONFIG_PAX_RANDMMAP
26510 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26511 +#endif
26512 +
26513 /* requesting a specific address */
26514 if (addr) {
26515 addr = PAGE_ALIGN(addr);
26516 vma = find_vma(mm, addr);
26517 - if (TASK_SIZE - len >= addr &&
26518 - (!vma || addr + len <= vma->vm_start))
26519 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
26520 return addr;
26521 }
26522
26523 @@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
26524 info.high_limit = mm->mmap_base;
26525 info.align_mask = filp ? get_align_mask() : 0;
26526 info.align_offset = pgoff << PAGE_SHIFT;
26527 + info.threadstack_offset = offset;
26528 addr = vm_unmapped_area(&info);
26529 if (!(addr & ~PAGE_MASK))
26530 return addr;
26531 diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
26532 index 91a4496..1730bff 100644
26533 --- a/arch/x86/kernel/tboot.c
26534 +++ b/arch/x86/kernel/tboot.c
26535 @@ -221,7 +221,7 @@ static int tboot_setup_sleep(void)
26536
26537 void tboot_shutdown(u32 shutdown_type)
26538 {
26539 - void (*shutdown)(void);
26540 + void (* __noreturn shutdown)(void);
26541
26542 if (!tboot_enabled())
26543 return;
26544 @@ -243,7 +243,7 @@ void tboot_shutdown(u32 shutdown_type)
26545
26546 switch_to_tboot_pt();
26547
26548 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
26549 + shutdown = (void *)tboot->shutdown_entry;
26550 shutdown();
26551
26552 /* should not reach here */
26553 @@ -310,7 +310,7 @@ static int tboot_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b)
26554 return -ENODEV;
26555 }
26556
26557 -static atomic_t ap_wfs_count;
26558 +static atomic_unchecked_t ap_wfs_count;
26559
26560 static int tboot_wait_for_aps(int num_aps)
26561 {
26562 @@ -334,9 +334,9 @@ static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
26563 {
26564 switch (action) {
26565 case CPU_DYING:
26566 - atomic_inc(&ap_wfs_count);
26567 + atomic_inc_unchecked(&ap_wfs_count);
26568 if (num_online_cpus() == 1)
26569 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
26570 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
26571 return NOTIFY_BAD;
26572 break;
26573 }
26574 @@ -422,7 +422,7 @@ static __init int tboot_late_init(void)
26575
26576 tboot_create_trampoline();
26577
26578 - atomic_set(&ap_wfs_count, 0);
26579 + atomic_set_unchecked(&ap_wfs_count, 0);
26580 register_hotcpu_notifier(&tboot_cpu_notifier);
26581
26582 #ifdef CONFIG_DEBUG_FS
26583 diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
26584 index 24d3c91..d06b473 100644
26585 --- a/arch/x86/kernel/time.c
26586 +++ b/arch/x86/kernel/time.c
26587 @@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
26588 {
26589 unsigned long pc = instruction_pointer(regs);
26590
26591 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
26592 + if (!user_mode(regs) && in_lock_functions(pc)) {
26593 #ifdef CONFIG_FRAME_POINTER
26594 - return *(unsigned long *)(regs->bp + sizeof(long));
26595 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
26596 #else
26597 unsigned long *sp =
26598 (unsigned long *)kernel_stack_pointer(regs);
26599 @@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
26600 * or above a saved flags. Eflags has bits 22-31 zero,
26601 * kernel addresses don't.
26602 */
26603 +
26604 +#ifdef CONFIG_PAX_KERNEXEC
26605 + return ktla_ktva(sp[0]);
26606 +#else
26607 if (sp[0] >> 22)
26608 return sp[0];
26609 if (sp[1] >> 22)
26610 return sp[1];
26611 #endif
26612 +
26613 +#endif
26614 }
26615 return pc;
26616 }
26617 diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
26618 index f7fec09..9991981 100644
26619 --- a/arch/x86/kernel/tls.c
26620 +++ b/arch/x86/kernel/tls.c
26621 @@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
26622 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
26623 return -EINVAL;
26624
26625 +#ifdef CONFIG_PAX_SEGMEXEC
26626 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
26627 + return -EINVAL;
26628 +#endif
26629 +
26630 set_tls_desc(p, idx, &info, 1);
26631
26632 return 0;
26633 @@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
26634
26635 if (kbuf)
26636 info = kbuf;
26637 - else if (__copy_from_user(infobuf, ubuf, count))
26638 + else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
26639 return -EFAULT;
26640 else
26641 info = infobuf;
26642 diff --git a/arch/x86/kernel/tracepoint.c b/arch/x86/kernel/tracepoint.c
26643 index 1c113db..287b42e 100644
26644 --- a/arch/x86/kernel/tracepoint.c
26645 +++ b/arch/x86/kernel/tracepoint.c
26646 @@ -9,11 +9,11 @@
26647 #include <linux/atomic.h>
26648
26649 atomic_t trace_idt_ctr = ATOMIC_INIT(0);
26650 -struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
26651 +const struct desc_ptr trace_idt_descr = { NR_VECTORS * 16 - 1,
26652 (unsigned long) trace_idt_table };
26653
26654 /* No need to be aligned, but done to keep all IDTs defined the same way. */
26655 -gate_desc trace_idt_table[NR_VECTORS] __page_aligned_bss;
26656 +gate_desc trace_idt_table[NR_VECTORS] __page_aligned_rodata;
26657
26658 static int trace_irq_vector_refcount;
26659 static DEFINE_MUTEX(irq_vector_mutex);
26660 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
26661 index 8c8093b..c93f581 100644
26662 --- a/arch/x86/kernel/traps.c
26663 +++ b/arch/x86/kernel/traps.c
26664 @@ -66,7 +66,7 @@
26665 #include <asm/proto.h>
26666
26667 /* No need to be aligned, but done to keep all IDTs defined the same way. */
26668 -gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
26669 +gate_desc debug_idt_table[NR_VECTORS] __page_aligned_rodata;
26670 #else
26671 #include <asm/processor-flags.h>
26672 #include <asm/setup.h>
26673 @@ -75,7 +75,7 @@ asmlinkage int system_call(void);
26674 #endif
26675
26676 /* Must be page-aligned because the real IDT is used in a fixmap. */
26677 -gate_desc idt_table[NR_VECTORS] __page_aligned_bss;
26678 +gate_desc idt_table[NR_VECTORS] __page_aligned_rodata;
26679
26680 DECLARE_BITMAP(used_vectors, NR_VECTORS);
26681 EXPORT_SYMBOL_GPL(used_vectors);
26682 @@ -107,11 +107,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
26683 }
26684
26685 static int __kprobes
26686 -do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
26687 +do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
26688 struct pt_regs *regs, long error_code)
26689 {
26690 #ifdef CONFIG_X86_32
26691 - if (regs->flags & X86_VM_MASK) {
26692 + if (v8086_mode(regs)) {
26693 /*
26694 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
26695 * On nmi (interrupt 2), do_trap should not be called.
26696 @@ -124,12 +124,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
26697 return -1;
26698 }
26699 #endif
26700 - if (!user_mode(regs)) {
26701 + if (!user_mode_novm(regs)) {
26702 if (!fixup_exception(regs)) {
26703 tsk->thread.error_code = error_code;
26704 tsk->thread.trap_nr = trapnr;
26705 +
26706 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26707 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
26708 + str = "PAX: suspicious stack segment fault";
26709 +#endif
26710 +
26711 die(str, regs, error_code);
26712 }
26713 +
26714 +#ifdef CONFIG_PAX_REFCOUNT
26715 + if (trapnr == 4)
26716 + pax_report_refcount_overflow(regs);
26717 +#endif
26718 +
26719 return 0;
26720 }
26721
26722 @@ -137,7 +149,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
26723 }
26724
26725 static void __kprobes
26726 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
26727 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
26728 long error_code, siginfo_t *info)
26729 {
26730 struct task_struct *tsk = current;
26731 @@ -161,7 +173,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
26732 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
26733 printk_ratelimit()) {
26734 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
26735 - tsk->comm, tsk->pid, str,
26736 + tsk->comm, task_pid_nr(tsk), str,
26737 regs->ip, regs->sp, error_code);
26738 print_vma_addr(" in ", regs->ip);
26739 pr_cont("\n");
26740 @@ -277,7 +289,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
26741 conditional_sti(regs);
26742
26743 #ifdef CONFIG_X86_32
26744 - if (regs->flags & X86_VM_MASK) {
26745 + if (v8086_mode(regs)) {
26746 local_irq_enable();
26747 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
26748 goto exit;
26749 @@ -285,18 +297,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
26750 #endif
26751
26752 tsk = current;
26753 - if (!user_mode(regs)) {
26754 + if (!user_mode_novm(regs)) {
26755 if (fixup_exception(regs))
26756 goto exit;
26757
26758 tsk->thread.error_code = error_code;
26759 tsk->thread.trap_nr = X86_TRAP_GP;
26760 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
26761 - X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
26762 + X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
26763 +
26764 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26765 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
26766 + die("PAX: suspicious general protection fault", regs, error_code);
26767 + else
26768 +#endif
26769 +
26770 die("general protection fault", regs, error_code);
26771 + }
26772 goto exit;
26773 }
26774
26775 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
26776 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
26777 + struct mm_struct *mm = tsk->mm;
26778 + unsigned long limit;
26779 +
26780 + down_write(&mm->mmap_sem);
26781 + limit = mm->context.user_cs_limit;
26782 + if (limit < TASK_SIZE) {
26783 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
26784 + up_write(&mm->mmap_sem);
26785 + return;
26786 + }
26787 + up_write(&mm->mmap_sem);
26788 + }
26789 +#endif
26790 +
26791 tsk->thread.error_code = error_code;
26792 tsk->thread.trap_nr = X86_TRAP_GP;
26793
26794 @@ -457,7 +493,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
26795 /* It's safe to allow irq's after DR6 has been saved */
26796 preempt_conditional_sti(regs);
26797
26798 - if (regs->flags & X86_VM_MASK) {
26799 + if (v8086_mode(regs)) {
26800 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
26801 X86_TRAP_DB);
26802 preempt_conditional_cli(regs);
26803 @@ -472,7 +508,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
26804 * We already checked v86 mode above, so we can check for kernel mode
26805 * by just checking the CPL of CS.
26806 */
26807 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
26808 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
26809 tsk->thread.debugreg6 &= ~DR_STEP;
26810 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
26811 regs->flags &= ~X86_EFLAGS_TF;
26812 @@ -504,7 +540,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
26813 return;
26814 conditional_sti(regs);
26815
26816 - if (!user_mode_vm(regs))
26817 + if (!user_mode(regs))
26818 {
26819 if (!fixup_exception(regs)) {
26820 task->thread.error_code = error_code;
26821 diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
26822 index 2ed8459..7cf329f 100644
26823 --- a/arch/x86/kernel/uprobes.c
26824 +++ b/arch/x86/kernel/uprobes.c
26825 @@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
26826 int ret = NOTIFY_DONE;
26827
26828 /* We are only interested in userspace traps */
26829 - if (regs && !user_mode_vm(regs))
26830 + if (regs && !user_mode(regs))
26831 return NOTIFY_DONE;
26832
26833 switch (val) {
26834 @@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
26835
26836 if (ncopied != rasize) {
26837 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
26838 - "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
26839 + "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
26840
26841 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
26842 }
26843 diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
26844 index b9242ba..50c5edd 100644
26845 --- a/arch/x86/kernel/verify_cpu.S
26846 +++ b/arch/x86/kernel/verify_cpu.S
26847 @@ -20,6 +20,7 @@
26848 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
26849 * arch/x86/kernel/trampoline_64.S: secondary processor verification
26850 * arch/x86/kernel/head_32.S: processor startup
26851 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
26852 *
26853 * verify_cpu, returns the status of longmode and SSE in register %eax.
26854 * 0: Success 1: Failure
26855 diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
26856 index e8edcf5..27f9344 100644
26857 --- a/arch/x86/kernel/vm86_32.c
26858 +++ b/arch/x86/kernel/vm86_32.c
26859 @@ -44,6 +44,7 @@
26860 #include <linux/ptrace.h>
26861 #include <linux/audit.h>
26862 #include <linux/stddef.h>
26863 +#include <linux/grsecurity.h>
26864
26865 #include <asm/uaccess.h>
26866 #include <asm/io.h>
26867 @@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
26868 do_exit(SIGSEGV);
26869 }
26870
26871 - tss = &per_cpu(init_tss, get_cpu());
26872 + tss = init_tss + get_cpu();
26873 current->thread.sp0 = current->thread.saved_sp0;
26874 current->thread.sysenter_cs = __KERNEL_CS;
26875 load_sp0(tss, &current->thread);
26876 @@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
26877
26878 if (tsk->thread.saved_sp0)
26879 return -EPERM;
26880 +
26881 +#ifdef CONFIG_GRKERNSEC_VM86
26882 + if (!capable(CAP_SYS_RAWIO)) {
26883 + gr_handle_vm86();
26884 + return -EPERM;
26885 + }
26886 +#endif
26887 +
26888 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
26889 offsetof(struct kernel_vm86_struct, vm86plus) -
26890 sizeof(info.regs));
26891 @@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
26892 int tmp;
26893 struct vm86plus_struct __user *v86;
26894
26895 +#ifdef CONFIG_GRKERNSEC_VM86
26896 + if (!capable(CAP_SYS_RAWIO)) {
26897 + gr_handle_vm86();
26898 + return -EPERM;
26899 + }
26900 +#endif
26901 +
26902 tsk = current;
26903 switch (cmd) {
26904 case VM86_REQUEST_IRQ:
26905 @@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
26906 tsk->thread.saved_fs = info->regs32->fs;
26907 tsk->thread.saved_gs = get_user_gs(info->regs32);
26908
26909 - tss = &per_cpu(init_tss, get_cpu());
26910 + tss = init_tss + get_cpu();
26911 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
26912 if (cpu_has_sep)
26913 tsk->thread.sysenter_cs = 0;
26914 @@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
26915 goto cannot_handle;
26916 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
26917 goto cannot_handle;
26918 - intr_ptr = (unsigned long __user *) (i << 2);
26919 + intr_ptr = (__force unsigned long __user *) (i << 2);
26920 if (get_user(segoffs, intr_ptr))
26921 goto cannot_handle;
26922 if ((segoffs >> 16) == BIOSSEG)
26923 diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
26924 index 10c4f30..65408b9 100644
26925 --- a/arch/x86/kernel/vmlinux.lds.S
26926 +++ b/arch/x86/kernel/vmlinux.lds.S
26927 @@ -26,6 +26,13 @@
26928 #include <asm/page_types.h>
26929 #include <asm/cache.h>
26930 #include <asm/boot.h>
26931 +#include <asm/segment.h>
26932 +
26933 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26934 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
26935 +#else
26936 +#define __KERNEL_TEXT_OFFSET 0
26937 +#endif
26938
26939 #undef i386 /* in case the preprocessor is a 32bit one */
26940
26941 @@ -69,30 +76,43 @@ jiffies_64 = jiffies;
26942
26943 PHDRS {
26944 text PT_LOAD FLAGS(5); /* R_E */
26945 +#ifdef CONFIG_X86_32
26946 + module PT_LOAD FLAGS(5); /* R_E */
26947 +#endif
26948 +#ifdef CONFIG_XEN
26949 + rodata PT_LOAD FLAGS(5); /* R_E */
26950 +#else
26951 + rodata PT_LOAD FLAGS(4); /* R__ */
26952 +#endif
26953 data PT_LOAD FLAGS(6); /* RW_ */
26954 -#ifdef CONFIG_X86_64
26955 + init.begin PT_LOAD FLAGS(6); /* RW_ */
26956 #ifdef CONFIG_SMP
26957 percpu PT_LOAD FLAGS(6); /* RW_ */
26958 #endif
26959 + text.init PT_LOAD FLAGS(5); /* R_E */
26960 + text.exit PT_LOAD FLAGS(5); /* R_E */
26961 init PT_LOAD FLAGS(7); /* RWE */
26962 -#endif
26963 note PT_NOTE FLAGS(0); /* ___ */
26964 }
26965
26966 SECTIONS
26967 {
26968 #ifdef CONFIG_X86_32
26969 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
26970 - phys_startup_32 = startup_32 - LOAD_OFFSET;
26971 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
26972 #else
26973 - . = __START_KERNEL;
26974 - phys_startup_64 = startup_64 - LOAD_OFFSET;
26975 + . = __START_KERNEL;
26976 #endif
26977
26978 /* Text and read-only data */
26979 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
26980 - _text = .;
26981 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
26982 /* bootstrapping code */
26983 +#ifdef CONFIG_X86_32
26984 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26985 +#else
26986 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26987 +#endif
26988 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
26989 + _text = .;
26990 HEAD_TEXT
26991 . = ALIGN(8);
26992 _stext = .;
26993 @@ -104,13 +124,47 @@ SECTIONS
26994 IRQENTRY_TEXT
26995 *(.fixup)
26996 *(.gnu.warning)
26997 - /* End of text section */
26998 - _etext = .;
26999 } :text = 0x9090
27000
27001 - NOTES :text :note
27002 + . += __KERNEL_TEXT_OFFSET;
27003
27004 - EXCEPTION_TABLE(16) :text = 0x9090
27005 +#ifdef CONFIG_X86_32
27006 + . = ALIGN(PAGE_SIZE);
27007 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
27008 +
27009 +#ifdef CONFIG_PAX_KERNEXEC
27010 + MODULES_EXEC_VADDR = .;
27011 + BYTE(0)
27012 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
27013 + . = ALIGN(HPAGE_SIZE) - 1;
27014 + MODULES_EXEC_END = .;
27015 +#endif
27016 +
27017 + } :module
27018 +#endif
27019 +
27020 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
27021 + /* End of text section */
27022 + BYTE(0)
27023 + _etext = . - __KERNEL_TEXT_OFFSET;
27024 + }
27025 +
27026 +#ifdef CONFIG_X86_32
27027 + . = ALIGN(PAGE_SIZE);
27028 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
27029 + . = ALIGN(PAGE_SIZE);
27030 + *(.empty_zero_page)
27031 + *(.initial_pg_fixmap)
27032 + *(.initial_pg_pmd)
27033 + *(.initial_page_table)
27034 + *(.swapper_pg_dir)
27035 + } :rodata
27036 +#endif
27037 +
27038 + . = ALIGN(PAGE_SIZE);
27039 + NOTES :rodata :note
27040 +
27041 + EXCEPTION_TABLE(16) :rodata
27042
27043 #if defined(CONFIG_DEBUG_RODATA)
27044 /* .text should occupy whole number of pages */
27045 @@ -122,16 +176,20 @@ SECTIONS
27046
27047 /* Data */
27048 .data : AT(ADDR(.data) - LOAD_OFFSET) {
27049 +
27050 +#ifdef CONFIG_PAX_KERNEXEC
27051 + . = ALIGN(HPAGE_SIZE);
27052 +#else
27053 + . = ALIGN(PAGE_SIZE);
27054 +#endif
27055 +
27056 /* Start of data section */
27057 _sdata = .;
27058
27059 /* init_task */
27060 INIT_TASK_DATA(THREAD_SIZE)
27061
27062 -#ifdef CONFIG_X86_32
27063 - /* 32 bit has nosave before _edata */
27064 NOSAVE_DATA
27065 -#endif
27066
27067 PAGE_ALIGNED_DATA(PAGE_SIZE)
27068
27069 @@ -172,12 +230,19 @@ SECTIONS
27070 #endif /* CONFIG_X86_64 */
27071
27072 /* Init code and data - will be freed after init */
27073 - . = ALIGN(PAGE_SIZE);
27074 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
27075 + BYTE(0)
27076 +
27077 +#ifdef CONFIG_PAX_KERNEXEC
27078 + . = ALIGN(HPAGE_SIZE);
27079 +#else
27080 + . = ALIGN(PAGE_SIZE);
27081 +#endif
27082 +
27083 __init_begin = .; /* paired with __init_end */
27084 - }
27085 + } :init.begin
27086
27087 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
27088 +#ifdef CONFIG_SMP
27089 /*
27090 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
27091 * output PHDR, so the next output section - .init.text - should
27092 @@ -186,12 +251,27 @@ SECTIONS
27093 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
27094 #endif
27095
27096 - INIT_TEXT_SECTION(PAGE_SIZE)
27097 -#ifdef CONFIG_X86_64
27098 - :init
27099 -#endif
27100 + . = ALIGN(PAGE_SIZE);
27101 + init_begin = .;
27102 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
27103 + VMLINUX_SYMBOL(_sinittext) = .;
27104 + INIT_TEXT
27105 + VMLINUX_SYMBOL(_einittext) = .;
27106 + . = ALIGN(PAGE_SIZE);
27107 + } :text.init
27108
27109 - INIT_DATA_SECTION(16)
27110 + /*
27111 + * .exit.text is discard at runtime, not link time, to deal with
27112 + * references from .altinstructions and .eh_frame
27113 + */
27114 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
27115 + EXIT_TEXT
27116 + . = ALIGN(16);
27117 + } :text.exit
27118 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
27119 +
27120 + . = ALIGN(PAGE_SIZE);
27121 + INIT_DATA_SECTION(16) :init
27122
27123 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
27124 __x86_cpu_dev_start = .;
27125 @@ -253,19 +333,12 @@ SECTIONS
27126 }
27127
27128 . = ALIGN(8);
27129 - /*
27130 - * .exit.text is discard at runtime, not link time, to deal with
27131 - * references from .altinstructions and .eh_frame
27132 - */
27133 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
27134 - EXIT_TEXT
27135 - }
27136
27137 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
27138 EXIT_DATA
27139 }
27140
27141 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
27142 +#ifndef CONFIG_SMP
27143 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
27144 #endif
27145
27146 @@ -284,16 +357,10 @@ SECTIONS
27147 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
27148 __smp_locks = .;
27149 *(.smp_locks)
27150 - . = ALIGN(PAGE_SIZE);
27151 __smp_locks_end = .;
27152 + . = ALIGN(PAGE_SIZE);
27153 }
27154
27155 -#ifdef CONFIG_X86_64
27156 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
27157 - NOSAVE_DATA
27158 - }
27159 -#endif
27160 -
27161 /* BSS */
27162 . = ALIGN(PAGE_SIZE);
27163 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
27164 @@ -309,6 +376,7 @@ SECTIONS
27165 __brk_base = .;
27166 . += 64 * 1024; /* 64k alignment slop space */
27167 *(.brk_reservation) /* areas brk users have reserved */
27168 + . = ALIGN(HPAGE_SIZE);
27169 __brk_limit = .;
27170 }
27171
27172 @@ -335,13 +403,12 @@ SECTIONS
27173 * for the boot processor.
27174 */
27175 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
27176 -INIT_PER_CPU(gdt_page);
27177 INIT_PER_CPU(irq_stack_union);
27178
27179 /*
27180 * Build-time check on the image size:
27181 */
27182 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
27183 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
27184 "kernel image bigger than KERNEL_IMAGE_SIZE");
27185
27186 #ifdef CONFIG_SMP
27187 diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
27188 index 1f96f93..d5c8f7a 100644
27189 --- a/arch/x86/kernel/vsyscall_64.c
27190 +++ b/arch/x86/kernel/vsyscall_64.c
27191 @@ -56,15 +56,13 @@
27192 DEFINE_VVAR(int, vgetcpu_mode);
27193 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
27194
27195 -static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
27196 +static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
27197
27198 static int __init vsyscall_setup(char *str)
27199 {
27200 if (str) {
27201 if (!strcmp("emulate", str))
27202 vsyscall_mode = EMULATE;
27203 - else if (!strcmp("native", str))
27204 - vsyscall_mode = NATIVE;
27205 else if (!strcmp("none", str))
27206 vsyscall_mode = NONE;
27207 else
27208 @@ -323,8 +321,7 @@ do_ret:
27209 return true;
27210
27211 sigsegv:
27212 - force_sig(SIGSEGV, current);
27213 - return true;
27214 + do_group_exit(SIGKILL);
27215 }
27216
27217 /*
27218 @@ -377,10 +374,7 @@ void __init map_vsyscall(void)
27219 extern char __vvar_page;
27220 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
27221
27222 - __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
27223 - vsyscall_mode == NATIVE
27224 - ? PAGE_KERNEL_VSYSCALL
27225 - : PAGE_KERNEL_VVAR);
27226 + __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
27227 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
27228 (unsigned long)VSYSCALL_START);
27229
27230 diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
27231 index b014d94..e775258 100644
27232 --- a/arch/x86/kernel/x8664_ksyms_64.c
27233 +++ b/arch/x86/kernel/x8664_ksyms_64.c
27234 @@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
27235 EXPORT_SYMBOL(copy_user_generic_unrolled);
27236 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
27237 EXPORT_SYMBOL(__copy_user_nocache);
27238 -EXPORT_SYMBOL(_copy_from_user);
27239 -EXPORT_SYMBOL(_copy_to_user);
27240
27241 EXPORT_SYMBOL(copy_page);
27242 EXPORT_SYMBOL(clear_page);
27243 @@ -66,3 +64,7 @@ EXPORT_SYMBOL(empty_zero_page);
27244 #ifndef CONFIG_PARAVIRT
27245 EXPORT_SYMBOL(native_load_gs_index);
27246 #endif
27247 +
27248 +#ifdef CONFIG_PAX_PER_CPU_PGD
27249 +EXPORT_SYMBOL(cpu_pgd);
27250 +#endif
27251 diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
27252 index 8ce0072..431a0e7 100644
27253 --- a/arch/x86/kernel/x86_init.c
27254 +++ b/arch/x86/kernel/x86_init.c
27255 @@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit = {
27256 static void default_nmi_init(void) { };
27257 static int default_i8042_detect(void) { return 1; };
27258
27259 -struct x86_platform_ops x86_platform = {
27260 +struct x86_platform_ops x86_platform __read_only = {
27261 .calibrate_tsc = native_calibrate_tsc,
27262 .get_wallclock = mach_get_cmos_time,
27263 .set_wallclock = mach_set_rtc_mmss,
27264 @@ -109,7 +109,7 @@ struct x86_platform_ops x86_platform = {
27265 EXPORT_SYMBOL_GPL(x86_platform);
27266
27267 #if defined(CONFIG_PCI_MSI)
27268 -struct x86_msi_ops x86_msi = {
27269 +struct x86_msi_ops x86_msi __read_only = {
27270 .setup_msi_irqs = native_setup_msi_irqs,
27271 .compose_msi_msg = native_compose_msi_msg,
27272 .teardown_msi_irq = native_teardown_msi_irq,
27273 @@ -140,7 +140,7 @@ void arch_restore_msi_irqs(struct pci_dev *dev, int irq)
27274 }
27275 #endif
27276
27277 -struct x86_io_apic_ops x86_io_apic_ops = {
27278 +struct x86_io_apic_ops x86_io_apic_ops __read_only = {
27279 .init = native_io_apic_init_mappings,
27280 .read = native_io_apic_read,
27281 .write = native_io_apic_write,
27282 diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
27283 index 422fd82..c3687ca 100644
27284 --- a/arch/x86/kernel/xsave.c
27285 +++ b/arch/x86/kernel/xsave.c
27286 @@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
27287 {
27288 int err;
27289
27290 + buf = (struct xsave_struct __user *)____m(buf);
27291 if (use_xsave())
27292 err = xsave_user(buf);
27293 else if (use_fxsr())
27294 @@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
27295 */
27296 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
27297 {
27298 + buf = (void __user *)____m(buf);
27299 if (use_xsave()) {
27300 if ((unsigned long)buf % 64 || fx_only) {
27301 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
27302 diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
27303 index b110fe6..d9c19f2 100644
27304 --- a/arch/x86/kvm/cpuid.c
27305 +++ b/arch/x86/kvm/cpuid.c
27306 @@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
27307 struct kvm_cpuid2 *cpuid,
27308 struct kvm_cpuid_entry2 __user *entries)
27309 {
27310 - int r;
27311 + int r, i;
27312
27313 r = -E2BIG;
27314 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
27315 goto out;
27316 r = -EFAULT;
27317 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
27318 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
27319 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
27320 goto out;
27321 + for (i = 0; i < cpuid->nent; ++i) {
27322 + struct kvm_cpuid_entry2 cpuid_entry;
27323 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
27324 + goto out;
27325 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
27326 + }
27327 vcpu->arch.cpuid_nent = cpuid->nent;
27328 kvm_apic_set_version(vcpu);
27329 kvm_x86_ops->cpuid_update(vcpu);
27330 @@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
27331 struct kvm_cpuid2 *cpuid,
27332 struct kvm_cpuid_entry2 __user *entries)
27333 {
27334 - int r;
27335 + int r, i;
27336
27337 r = -E2BIG;
27338 if (cpuid->nent < vcpu->arch.cpuid_nent)
27339 goto out;
27340 r = -EFAULT;
27341 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
27342 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
27343 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
27344 goto out;
27345 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
27346 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
27347 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
27348 + goto out;
27349 + }
27350 return 0;
27351
27352 out:
27353 diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
27354 index dec48bf..f4d21f7 100644
27355 --- a/arch/x86/kvm/lapic.c
27356 +++ b/arch/x86/kvm/lapic.c
27357 @@ -55,7 +55,7 @@
27358 #define APIC_BUS_CYCLE_NS 1
27359
27360 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
27361 -#define apic_debug(fmt, arg...)
27362 +#define apic_debug(fmt, arg...) do {} while (0)
27363
27364 #define APIC_LVT_NUM 6
27365 /* 14 is the version for Xeon and Pentium 8.4.8*/
27366 diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
27367 index ad75d77..a679d32 100644
27368 --- a/arch/x86/kvm/paging_tmpl.h
27369 +++ b/arch/x86/kvm/paging_tmpl.h
27370 @@ -331,7 +331,7 @@ retry_walk:
27371 if (unlikely(kvm_is_error_hva(host_addr)))
27372 goto error;
27373
27374 - ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
27375 + ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
27376 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
27377 goto error;
27378 walker->ptep_user[walker->level - 1] = ptep_user;
27379 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
27380 index c0bc803..6837a50 100644
27381 --- a/arch/x86/kvm/svm.c
27382 +++ b/arch/x86/kvm/svm.c
27383 @@ -3501,7 +3501,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
27384 int cpu = raw_smp_processor_id();
27385
27386 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
27387 +
27388 + pax_open_kernel();
27389 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
27390 + pax_close_kernel();
27391 +
27392 load_TR_desc();
27393 }
27394
27395 @@ -3902,6 +3906,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
27396 #endif
27397 #endif
27398
27399 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
27400 + __set_fs(current_thread_info()->addr_limit);
27401 +#endif
27402 +
27403 reload_tss(vcpu);
27404
27405 local_irq_disable();
27406 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
27407 index 2b2fce1..da76be4 100644
27408 --- a/arch/x86/kvm/vmx.c
27409 +++ b/arch/x86/kvm/vmx.c
27410 @@ -1316,12 +1316,12 @@ static void vmcs_write64(unsigned long field, u64 value)
27411 #endif
27412 }
27413
27414 -static void vmcs_clear_bits(unsigned long field, u32 mask)
27415 +static void vmcs_clear_bits(unsigned long field, unsigned long mask)
27416 {
27417 vmcs_writel(field, vmcs_readl(field) & ~mask);
27418 }
27419
27420 -static void vmcs_set_bits(unsigned long field, u32 mask)
27421 +static void vmcs_set_bits(unsigned long field, unsigned long mask)
27422 {
27423 vmcs_writel(field, vmcs_readl(field) | mask);
27424 }
27425 @@ -1522,7 +1522,11 @@ static void reload_tss(void)
27426 struct desc_struct *descs;
27427
27428 descs = (void *)gdt->address;
27429 +
27430 + pax_open_kernel();
27431 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
27432 + pax_close_kernel();
27433 +
27434 load_TR_desc();
27435 }
27436
27437 @@ -1746,6 +1750,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
27438 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
27439 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
27440
27441 +#ifdef CONFIG_PAX_PER_CPU_PGD
27442 + vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
27443 +#endif
27444 +
27445 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
27446 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
27447 vmx->loaded_vmcs->cpu = cpu;
27448 @@ -2037,7 +2045,7 @@ static void setup_msrs(struct vcpu_vmx *vmx)
27449 * reads and returns guest's timestamp counter "register"
27450 * guest_tsc = host_tsc + tsc_offset -- 21.3
27451 */
27452 -static u64 guest_read_tsc(void)
27453 +static u64 __intentional_overflow(-1) guest_read_tsc(void)
27454 {
27455 u64 host_tsc, tsc_offset;
27456
27457 @@ -2982,8 +2990,11 @@ static __init int hardware_setup(void)
27458 if (!cpu_has_vmx_flexpriority())
27459 flexpriority_enabled = 0;
27460
27461 - if (!cpu_has_vmx_tpr_shadow())
27462 - kvm_x86_ops->update_cr8_intercept = NULL;
27463 + if (!cpu_has_vmx_tpr_shadow()) {
27464 + pax_open_kernel();
27465 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
27466 + pax_close_kernel();
27467 + }
27468
27469 if (enable_ept && !cpu_has_vmx_ept_2m_page())
27470 kvm_disable_largepages();
27471 @@ -2994,13 +3005,15 @@ static __init int hardware_setup(void)
27472 if (!cpu_has_vmx_apicv())
27473 enable_apicv = 0;
27474
27475 + pax_open_kernel();
27476 if (enable_apicv)
27477 - kvm_x86_ops->update_cr8_intercept = NULL;
27478 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
27479 else {
27480 - kvm_x86_ops->hwapic_irr_update = NULL;
27481 - kvm_x86_ops->deliver_posted_interrupt = NULL;
27482 - kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
27483 + *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
27484 + *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
27485 + *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
27486 }
27487 + pax_close_kernel();
27488
27489 if (nested)
27490 nested_vmx_setup_ctls_msrs();
27491 @@ -4127,7 +4140,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
27492
27493 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
27494 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
27495 +
27496 +#ifndef CONFIG_PAX_PER_CPU_PGD
27497 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
27498 +#endif
27499
27500 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
27501 #ifdef CONFIG_X86_64
27502 @@ -4149,7 +4165,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
27503 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
27504 vmx->host_idt_base = dt.address;
27505
27506 - vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
27507 + vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
27508
27509 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
27510 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
27511 @@ -7191,6 +7207,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27512 "jmp 2f \n\t"
27513 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
27514 "2: "
27515 +
27516 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27517 + "ljmp %[cs],$3f\n\t"
27518 + "3: "
27519 +#endif
27520 +
27521 /* Save guest registers, load host registers, keep flags */
27522 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
27523 "pop %0 \n\t"
27524 @@ -7243,6 +7265,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27525 #endif
27526 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
27527 [wordsize]"i"(sizeof(ulong))
27528 +
27529 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27530 + ,[cs]"i"(__KERNEL_CS)
27531 +#endif
27532 +
27533 : "cc", "memory"
27534 #ifdef CONFIG_X86_64
27535 , "rax", "rbx", "rdi", "rsi"
27536 @@ -7256,7 +7283,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27537 if (debugctlmsr)
27538 update_debugctlmsr(debugctlmsr);
27539
27540 -#ifndef CONFIG_X86_64
27541 +#ifdef CONFIG_X86_32
27542 /*
27543 * The sysexit path does not restore ds/es, so we must set them to
27544 * a reasonable value ourselves.
27545 @@ -7265,8 +7292,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
27546 * may be executed in interrupt context, which saves and restore segments
27547 * around it, nullifying its effect.
27548 */
27549 - loadsegment(ds, __USER_DS);
27550 - loadsegment(es, __USER_DS);
27551 + loadsegment(ds, __KERNEL_DS);
27552 + loadsegment(es, __KERNEL_DS);
27553 + loadsegment(ss, __KERNEL_DS);
27554 +
27555 +#ifdef CONFIG_PAX_KERNEXEC
27556 + loadsegment(fs, __KERNEL_PERCPU);
27557 +#endif
27558 +
27559 +#ifdef CONFIG_PAX_MEMORY_UDEREF
27560 + __set_fs(current_thread_info()->addr_limit);
27561 +#endif
27562 +
27563 #endif
27564
27565 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
27566 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
27567 index eb9b9c9..0f30b12 100644
27568 --- a/arch/x86/kvm/x86.c
27569 +++ b/arch/x86/kvm/x86.c
27570 @@ -1779,8 +1779,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
27571 {
27572 struct kvm *kvm = vcpu->kvm;
27573 int lm = is_long_mode(vcpu);
27574 - u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
27575 - : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
27576 + u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
27577 + : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
27578 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
27579 : kvm->arch.xen_hvm_config.blob_size_32;
27580 u32 page_num = data & ~PAGE_MASK;
27581 @@ -2663,6 +2663,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
27582 if (n < msr_list.nmsrs)
27583 goto out;
27584 r = -EFAULT;
27585 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
27586 + goto out;
27587 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
27588 num_msrs_to_save * sizeof(u32)))
27589 goto out;
27590 @@ -5461,7 +5463,7 @@ static struct notifier_block pvclock_gtod_notifier = {
27591 };
27592 #endif
27593
27594 -int kvm_arch_init(void *opaque)
27595 +int kvm_arch_init(const void *opaque)
27596 {
27597 int r;
27598 struct kvm_x86_ops *ops = opaque;
27599 diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
27600 index bdf8532..f63c587 100644
27601 --- a/arch/x86/lguest/boot.c
27602 +++ b/arch/x86/lguest/boot.c
27603 @@ -1206,9 +1206,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
27604 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
27605 * Launcher to reboot us.
27606 */
27607 -static void lguest_restart(char *reason)
27608 +static __noreturn void lguest_restart(char *reason)
27609 {
27610 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
27611 + BUG();
27612 }
27613
27614 /*G:050
27615 diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
27616 index 00933d5..3a64af9 100644
27617 --- a/arch/x86/lib/atomic64_386_32.S
27618 +++ b/arch/x86/lib/atomic64_386_32.S
27619 @@ -48,6 +48,10 @@ BEGIN(read)
27620 movl (v), %eax
27621 movl 4(v), %edx
27622 RET_ENDP
27623 +BEGIN(read_unchecked)
27624 + movl (v), %eax
27625 + movl 4(v), %edx
27626 +RET_ENDP
27627 #undef v
27628
27629 #define v %esi
27630 @@ -55,6 +59,10 @@ BEGIN(set)
27631 movl %ebx, (v)
27632 movl %ecx, 4(v)
27633 RET_ENDP
27634 +BEGIN(set_unchecked)
27635 + movl %ebx, (v)
27636 + movl %ecx, 4(v)
27637 +RET_ENDP
27638 #undef v
27639
27640 #define v %esi
27641 @@ -70,6 +78,20 @@ RET_ENDP
27642 BEGIN(add)
27643 addl %eax, (v)
27644 adcl %edx, 4(v)
27645 +
27646 +#ifdef CONFIG_PAX_REFCOUNT
27647 + jno 0f
27648 + subl %eax, (v)
27649 + sbbl %edx, 4(v)
27650 + int $4
27651 +0:
27652 + _ASM_EXTABLE(0b, 0b)
27653 +#endif
27654 +
27655 +RET_ENDP
27656 +BEGIN(add_unchecked)
27657 + addl %eax, (v)
27658 + adcl %edx, 4(v)
27659 RET_ENDP
27660 #undef v
27661
27662 @@ -77,6 +99,24 @@ RET_ENDP
27663 BEGIN(add_return)
27664 addl (v), %eax
27665 adcl 4(v), %edx
27666 +
27667 +#ifdef CONFIG_PAX_REFCOUNT
27668 + into
27669 +1234:
27670 + _ASM_EXTABLE(1234b, 2f)
27671 +#endif
27672 +
27673 + movl %eax, (v)
27674 + movl %edx, 4(v)
27675 +
27676 +#ifdef CONFIG_PAX_REFCOUNT
27677 +2:
27678 +#endif
27679 +
27680 +RET_ENDP
27681 +BEGIN(add_return_unchecked)
27682 + addl (v), %eax
27683 + adcl 4(v), %edx
27684 movl %eax, (v)
27685 movl %edx, 4(v)
27686 RET_ENDP
27687 @@ -86,6 +126,20 @@ RET_ENDP
27688 BEGIN(sub)
27689 subl %eax, (v)
27690 sbbl %edx, 4(v)
27691 +
27692 +#ifdef CONFIG_PAX_REFCOUNT
27693 + jno 0f
27694 + addl %eax, (v)
27695 + adcl %edx, 4(v)
27696 + int $4
27697 +0:
27698 + _ASM_EXTABLE(0b, 0b)
27699 +#endif
27700 +
27701 +RET_ENDP
27702 +BEGIN(sub_unchecked)
27703 + subl %eax, (v)
27704 + sbbl %edx, 4(v)
27705 RET_ENDP
27706 #undef v
27707
27708 @@ -96,6 +150,27 @@ BEGIN(sub_return)
27709 sbbl $0, %edx
27710 addl (v), %eax
27711 adcl 4(v), %edx
27712 +
27713 +#ifdef CONFIG_PAX_REFCOUNT
27714 + into
27715 +1234:
27716 + _ASM_EXTABLE(1234b, 2f)
27717 +#endif
27718 +
27719 + movl %eax, (v)
27720 + movl %edx, 4(v)
27721 +
27722 +#ifdef CONFIG_PAX_REFCOUNT
27723 +2:
27724 +#endif
27725 +
27726 +RET_ENDP
27727 +BEGIN(sub_return_unchecked)
27728 + negl %edx
27729 + negl %eax
27730 + sbbl $0, %edx
27731 + addl (v), %eax
27732 + adcl 4(v), %edx
27733 movl %eax, (v)
27734 movl %edx, 4(v)
27735 RET_ENDP
27736 @@ -105,6 +180,20 @@ RET_ENDP
27737 BEGIN(inc)
27738 addl $1, (v)
27739 adcl $0, 4(v)
27740 +
27741 +#ifdef CONFIG_PAX_REFCOUNT
27742 + jno 0f
27743 + subl $1, (v)
27744 + sbbl $0, 4(v)
27745 + int $4
27746 +0:
27747 + _ASM_EXTABLE(0b, 0b)
27748 +#endif
27749 +
27750 +RET_ENDP
27751 +BEGIN(inc_unchecked)
27752 + addl $1, (v)
27753 + adcl $0, 4(v)
27754 RET_ENDP
27755 #undef v
27756
27757 @@ -114,6 +203,26 @@ BEGIN(inc_return)
27758 movl 4(v), %edx
27759 addl $1, %eax
27760 adcl $0, %edx
27761 +
27762 +#ifdef CONFIG_PAX_REFCOUNT
27763 + into
27764 +1234:
27765 + _ASM_EXTABLE(1234b, 2f)
27766 +#endif
27767 +
27768 + movl %eax, (v)
27769 + movl %edx, 4(v)
27770 +
27771 +#ifdef CONFIG_PAX_REFCOUNT
27772 +2:
27773 +#endif
27774 +
27775 +RET_ENDP
27776 +BEGIN(inc_return_unchecked)
27777 + movl (v), %eax
27778 + movl 4(v), %edx
27779 + addl $1, %eax
27780 + adcl $0, %edx
27781 movl %eax, (v)
27782 movl %edx, 4(v)
27783 RET_ENDP
27784 @@ -123,6 +232,20 @@ RET_ENDP
27785 BEGIN(dec)
27786 subl $1, (v)
27787 sbbl $0, 4(v)
27788 +
27789 +#ifdef CONFIG_PAX_REFCOUNT
27790 + jno 0f
27791 + addl $1, (v)
27792 + adcl $0, 4(v)
27793 + int $4
27794 +0:
27795 + _ASM_EXTABLE(0b, 0b)
27796 +#endif
27797 +
27798 +RET_ENDP
27799 +BEGIN(dec_unchecked)
27800 + subl $1, (v)
27801 + sbbl $0, 4(v)
27802 RET_ENDP
27803 #undef v
27804
27805 @@ -132,6 +255,26 @@ BEGIN(dec_return)
27806 movl 4(v), %edx
27807 subl $1, %eax
27808 sbbl $0, %edx
27809 +
27810 +#ifdef CONFIG_PAX_REFCOUNT
27811 + into
27812 +1234:
27813 + _ASM_EXTABLE(1234b, 2f)
27814 +#endif
27815 +
27816 + movl %eax, (v)
27817 + movl %edx, 4(v)
27818 +
27819 +#ifdef CONFIG_PAX_REFCOUNT
27820 +2:
27821 +#endif
27822 +
27823 +RET_ENDP
27824 +BEGIN(dec_return_unchecked)
27825 + movl (v), %eax
27826 + movl 4(v), %edx
27827 + subl $1, %eax
27828 + sbbl $0, %edx
27829 movl %eax, (v)
27830 movl %edx, 4(v)
27831 RET_ENDP
27832 @@ -143,6 +286,13 @@ BEGIN(add_unless)
27833 adcl %edx, %edi
27834 addl (v), %eax
27835 adcl 4(v), %edx
27836 +
27837 +#ifdef CONFIG_PAX_REFCOUNT
27838 + into
27839 +1234:
27840 + _ASM_EXTABLE(1234b, 2f)
27841 +#endif
27842 +
27843 cmpl %eax, %ecx
27844 je 3f
27845 1:
27846 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
27847 1:
27848 addl $1, %eax
27849 adcl $0, %edx
27850 +
27851 +#ifdef CONFIG_PAX_REFCOUNT
27852 + into
27853 +1234:
27854 + _ASM_EXTABLE(1234b, 2f)
27855 +#endif
27856 +
27857 movl %eax, (v)
27858 movl %edx, 4(v)
27859 movl $1, %eax
27860 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
27861 movl 4(v), %edx
27862 subl $1, %eax
27863 sbbl $0, %edx
27864 +
27865 +#ifdef CONFIG_PAX_REFCOUNT
27866 + into
27867 +1234:
27868 + _ASM_EXTABLE(1234b, 1f)
27869 +#endif
27870 +
27871 js 1f
27872 movl %eax, (v)
27873 movl %edx, 4(v)
27874 diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
27875 index f5cc9eb..51fa319 100644
27876 --- a/arch/x86/lib/atomic64_cx8_32.S
27877 +++ b/arch/x86/lib/atomic64_cx8_32.S
27878 @@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
27879 CFI_STARTPROC
27880
27881 read64 %ecx
27882 + pax_force_retaddr
27883 ret
27884 CFI_ENDPROC
27885 ENDPROC(atomic64_read_cx8)
27886
27887 +ENTRY(atomic64_read_unchecked_cx8)
27888 + CFI_STARTPROC
27889 +
27890 + read64 %ecx
27891 + pax_force_retaddr
27892 + ret
27893 + CFI_ENDPROC
27894 +ENDPROC(atomic64_read_unchecked_cx8)
27895 +
27896 ENTRY(atomic64_set_cx8)
27897 CFI_STARTPROC
27898
27899 @@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
27900 cmpxchg8b (%esi)
27901 jne 1b
27902
27903 + pax_force_retaddr
27904 ret
27905 CFI_ENDPROC
27906 ENDPROC(atomic64_set_cx8)
27907
27908 +ENTRY(atomic64_set_unchecked_cx8)
27909 + CFI_STARTPROC
27910 +
27911 +1:
27912 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
27913 + * are atomic on 586 and newer */
27914 + cmpxchg8b (%esi)
27915 + jne 1b
27916 +
27917 + pax_force_retaddr
27918 + ret
27919 + CFI_ENDPROC
27920 +ENDPROC(atomic64_set_unchecked_cx8)
27921 +
27922 ENTRY(atomic64_xchg_cx8)
27923 CFI_STARTPROC
27924
27925 @@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
27926 cmpxchg8b (%esi)
27927 jne 1b
27928
27929 + pax_force_retaddr
27930 ret
27931 CFI_ENDPROC
27932 ENDPROC(atomic64_xchg_cx8)
27933
27934 -.macro addsub_return func ins insc
27935 -ENTRY(atomic64_\func\()_return_cx8)
27936 +.macro addsub_return func ins insc unchecked=""
27937 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
27938 CFI_STARTPROC
27939 SAVE ebp
27940 SAVE ebx
27941 @@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
27942 movl %edx, %ecx
27943 \ins\()l %esi, %ebx
27944 \insc\()l %edi, %ecx
27945 +
27946 +.ifb \unchecked
27947 +#ifdef CONFIG_PAX_REFCOUNT
27948 + into
27949 +2:
27950 + _ASM_EXTABLE(2b, 3f)
27951 +#endif
27952 +.endif
27953 +
27954 LOCK_PREFIX
27955 cmpxchg8b (%ebp)
27956 jne 1b
27957 -
27958 -10:
27959 movl %ebx, %eax
27960 movl %ecx, %edx
27961 +
27962 +.ifb \unchecked
27963 +#ifdef CONFIG_PAX_REFCOUNT
27964 +3:
27965 +#endif
27966 +.endif
27967 +
27968 RESTORE edi
27969 RESTORE esi
27970 RESTORE ebx
27971 RESTORE ebp
27972 + pax_force_retaddr
27973 ret
27974 CFI_ENDPROC
27975 -ENDPROC(atomic64_\func\()_return_cx8)
27976 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
27977 .endm
27978
27979 addsub_return add add adc
27980 addsub_return sub sub sbb
27981 +addsub_return add add adc _unchecked
27982 +addsub_return sub sub sbb _unchecked
27983
27984 -.macro incdec_return func ins insc
27985 -ENTRY(atomic64_\func\()_return_cx8)
27986 +.macro incdec_return func ins insc unchecked=""
27987 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
27988 CFI_STARTPROC
27989 SAVE ebx
27990
27991 @@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
27992 movl %edx, %ecx
27993 \ins\()l $1, %ebx
27994 \insc\()l $0, %ecx
27995 +
27996 +.ifb \unchecked
27997 +#ifdef CONFIG_PAX_REFCOUNT
27998 + into
27999 +2:
28000 + _ASM_EXTABLE(2b, 3f)
28001 +#endif
28002 +.endif
28003 +
28004 LOCK_PREFIX
28005 cmpxchg8b (%esi)
28006 jne 1b
28007
28008 -10:
28009 movl %ebx, %eax
28010 movl %ecx, %edx
28011 +
28012 +.ifb \unchecked
28013 +#ifdef CONFIG_PAX_REFCOUNT
28014 +3:
28015 +#endif
28016 +.endif
28017 +
28018 RESTORE ebx
28019 + pax_force_retaddr
28020 ret
28021 CFI_ENDPROC
28022 -ENDPROC(atomic64_\func\()_return_cx8)
28023 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
28024 .endm
28025
28026 incdec_return inc add adc
28027 incdec_return dec sub sbb
28028 +incdec_return inc add adc _unchecked
28029 +incdec_return dec sub sbb _unchecked
28030
28031 ENTRY(atomic64_dec_if_positive_cx8)
28032 CFI_STARTPROC
28033 @@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
28034 movl %edx, %ecx
28035 subl $1, %ebx
28036 sbb $0, %ecx
28037 +
28038 +#ifdef CONFIG_PAX_REFCOUNT
28039 + into
28040 +1234:
28041 + _ASM_EXTABLE(1234b, 2f)
28042 +#endif
28043 +
28044 js 2f
28045 LOCK_PREFIX
28046 cmpxchg8b (%esi)
28047 @@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
28048 movl %ebx, %eax
28049 movl %ecx, %edx
28050 RESTORE ebx
28051 + pax_force_retaddr
28052 ret
28053 CFI_ENDPROC
28054 ENDPROC(atomic64_dec_if_positive_cx8)
28055 @@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
28056 movl %edx, %ecx
28057 addl %ebp, %ebx
28058 adcl %edi, %ecx
28059 +
28060 +#ifdef CONFIG_PAX_REFCOUNT
28061 + into
28062 +1234:
28063 + _ASM_EXTABLE(1234b, 3f)
28064 +#endif
28065 +
28066 LOCK_PREFIX
28067 cmpxchg8b (%esi)
28068 jne 1b
28069 @@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
28070 CFI_ADJUST_CFA_OFFSET -8
28071 RESTORE ebx
28072 RESTORE ebp
28073 + pax_force_retaddr
28074 ret
28075 4:
28076 cmpl %edx, 4(%esp)
28077 @@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
28078 xorl %ecx, %ecx
28079 addl $1, %ebx
28080 adcl %edx, %ecx
28081 +
28082 +#ifdef CONFIG_PAX_REFCOUNT
28083 + into
28084 +1234:
28085 + _ASM_EXTABLE(1234b, 3f)
28086 +#endif
28087 +
28088 LOCK_PREFIX
28089 cmpxchg8b (%esi)
28090 jne 1b
28091 @@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
28092 movl $1, %eax
28093 3:
28094 RESTORE ebx
28095 + pax_force_retaddr
28096 ret
28097 CFI_ENDPROC
28098 ENDPROC(atomic64_inc_not_zero_cx8)
28099 diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
28100 index e78b8ee..7e173a8 100644
28101 --- a/arch/x86/lib/checksum_32.S
28102 +++ b/arch/x86/lib/checksum_32.S
28103 @@ -29,7 +29,8 @@
28104 #include <asm/dwarf2.h>
28105 #include <asm/errno.h>
28106 #include <asm/asm.h>
28107 -
28108 +#include <asm/segment.h>
28109 +
28110 /*
28111 * computes a partial checksum, e.g. for TCP/UDP fragments
28112 */
28113 @@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
28114
28115 #define ARGBASE 16
28116 #define FP 12
28117 -
28118 -ENTRY(csum_partial_copy_generic)
28119 +
28120 +ENTRY(csum_partial_copy_generic_to_user)
28121 CFI_STARTPROC
28122 +
28123 +#ifdef CONFIG_PAX_MEMORY_UDEREF
28124 + pushl_cfi %gs
28125 + popl_cfi %es
28126 + jmp csum_partial_copy_generic
28127 +#endif
28128 +
28129 +ENTRY(csum_partial_copy_generic_from_user)
28130 +
28131 +#ifdef CONFIG_PAX_MEMORY_UDEREF
28132 + pushl_cfi %gs
28133 + popl_cfi %ds
28134 +#endif
28135 +
28136 +ENTRY(csum_partial_copy_generic)
28137 subl $4,%esp
28138 CFI_ADJUST_CFA_OFFSET 4
28139 pushl_cfi %edi
28140 @@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
28141 jmp 4f
28142 SRC(1: movw (%esi), %bx )
28143 addl $2, %esi
28144 -DST( movw %bx, (%edi) )
28145 +DST( movw %bx, %es:(%edi) )
28146 addl $2, %edi
28147 addw %bx, %ax
28148 adcl $0, %eax
28149 @@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
28150 SRC(1: movl (%esi), %ebx )
28151 SRC( movl 4(%esi), %edx )
28152 adcl %ebx, %eax
28153 -DST( movl %ebx, (%edi) )
28154 +DST( movl %ebx, %es:(%edi) )
28155 adcl %edx, %eax
28156 -DST( movl %edx, 4(%edi) )
28157 +DST( movl %edx, %es:4(%edi) )
28158
28159 SRC( movl 8(%esi), %ebx )
28160 SRC( movl 12(%esi), %edx )
28161 adcl %ebx, %eax
28162 -DST( movl %ebx, 8(%edi) )
28163 +DST( movl %ebx, %es:8(%edi) )
28164 adcl %edx, %eax
28165 -DST( movl %edx, 12(%edi) )
28166 +DST( movl %edx, %es:12(%edi) )
28167
28168 SRC( movl 16(%esi), %ebx )
28169 SRC( movl 20(%esi), %edx )
28170 adcl %ebx, %eax
28171 -DST( movl %ebx, 16(%edi) )
28172 +DST( movl %ebx, %es:16(%edi) )
28173 adcl %edx, %eax
28174 -DST( movl %edx, 20(%edi) )
28175 +DST( movl %edx, %es:20(%edi) )
28176
28177 SRC( movl 24(%esi), %ebx )
28178 SRC( movl 28(%esi), %edx )
28179 adcl %ebx, %eax
28180 -DST( movl %ebx, 24(%edi) )
28181 +DST( movl %ebx, %es:24(%edi) )
28182 adcl %edx, %eax
28183 -DST( movl %edx, 28(%edi) )
28184 +DST( movl %edx, %es:28(%edi) )
28185
28186 lea 32(%esi), %esi
28187 lea 32(%edi), %edi
28188 @@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
28189 shrl $2, %edx # This clears CF
28190 SRC(3: movl (%esi), %ebx )
28191 adcl %ebx, %eax
28192 -DST( movl %ebx, (%edi) )
28193 +DST( movl %ebx, %es:(%edi) )
28194 lea 4(%esi), %esi
28195 lea 4(%edi), %edi
28196 dec %edx
28197 @@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
28198 jb 5f
28199 SRC( movw (%esi), %cx )
28200 leal 2(%esi), %esi
28201 -DST( movw %cx, (%edi) )
28202 +DST( movw %cx, %es:(%edi) )
28203 leal 2(%edi), %edi
28204 je 6f
28205 shll $16,%ecx
28206 SRC(5: movb (%esi), %cl )
28207 -DST( movb %cl, (%edi) )
28208 +DST( movb %cl, %es:(%edi) )
28209 6: addl %ecx, %eax
28210 adcl $0, %eax
28211 7:
28212 @@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
28213
28214 6001:
28215 movl ARGBASE+20(%esp), %ebx # src_err_ptr
28216 - movl $-EFAULT, (%ebx)
28217 + movl $-EFAULT, %ss:(%ebx)
28218
28219 # zero the complete destination - computing the rest
28220 # is too much work
28221 @@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
28222
28223 6002:
28224 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
28225 - movl $-EFAULT,(%ebx)
28226 + movl $-EFAULT,%ss:(%ebx)
28227 jmp 5000b
28228
28229 .previous
28230
28231 + pushl_cfi %ss
28232 + popl_cfi %ds
28233 + pushl_cfi %ss
28234 + popl_cfi %es
28235 popl_cfi %ebx
28236 CFI_RESTORE ebx
28237 popl_cfi %esi
28238 @@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
28239 popl_cfi %ecx # equivalent to addl $4,%esp
28240 ret
28241 CFI_ENDPROC
28242 -ENDPROC(csum_partial_copy_generic)
28243 +ENDPROC(csum_partial_copy_generic_to_user)
28244
28245 #else
28246
28247 /* Version for PentiumII/PPro */
28248
28249 #define ROUND1(x) \
28250 + nop; nop; nop; \
28251 SRC(movl x(%esi), %ebx ) ; \
28252 addl %ebx, %eax ; \
28253 - DST(movl %ebx, x(%edi) ) ;
28254 + DST(movl %ebx, %es:x(%edi)) ;
28255
28256 #define ROUND(x) \
28257 + nop; nop; nop; \
28258 SRC(movl x(%esi), %ebx ) ; \
28259 adcl %ebx, %eax ; \
28260 - DST(movl %ebx, x(%edi) ) ;
28261 + DST(movl %ebx, %es:x(%edi)) ;
28262
28263 #define ARGBASE 12
28264 -
28265 -ENTRY(csum_partial_copy_generic)
28266 +
28267 +ENTRY(csum_partial_copy_generic_to_user)
28268 CFI_STARTPROC
28269 +
28270 +#ifdef CONFIG_PAX_MEMORY_UDEREF
28271 + pushl_cfi %gs
28272 + popl_cfi %es
28273 + jmp csum_partial_copy_generic
28274 +#endif
28275 +
28276 +ENTRY(csum_partial_copy_generic_from_user)
28277 +
28278 +#ifdef CONFIG_PAX_MEMORY_UDEREF
28279 + pushl_cfi %gs
28280 + popl_cfi %ds
28281 +#endif
28282 +
28283 +ENTRY(csum_partial_copy_generic)
28284 pushl_cfi %ebx
28285 CFI_REL_OFFSET ebx, 0
28286 pushl_cfi %edi
28287 @@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
28288 subl %ebx, %edi
28289 lea -1(%esi),%edx
28290 andl $-32,%edx
28291 - lea 3f(%ebx,%ebx), %ebx
28292 + lea 3f(%ebx,%ebx,2), %ebx
28293 testl %esi, %esi
28294 jmp *%ebx
28295 1: addl $64,%esi
28296 @@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
28297 jb 5f
28298 SRC( movw (%esi), %dx )
28299 leal 2(%esi), %esi
28300 -DST( movw %dx, (%edi) )
28301 +DST( movw %dx, %es:(%edi) )
28302 leal 2(%edi), %edi
28303 je 6f
28304 shll $16,%edx
28305 5:
28306 SRC( movb (%esi), %dl )
28307 -DST( movb %dl, (%edi) )
28308 +DST( movb %dl, %es:(%edi) )
28309 6: addl %edx, %eax
28310 adcl $0, %eax
28311 7:
28312 .section .fixup, "ax"
28313 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
28314 - movl $-EFAULT, (%ebx)
28315 + movl $-EFAULT, %ss:(%ebx)
28316 # zero the complete destination (computing the rest is too much work)
28317 movl ARGBASE+8(%esp),%edi # dst
28318 movl ARGBASE+12(%esp),%ecx # len
28319 @@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
28320 rep; stosb
28321 jmp 7b
28322 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
28323 - movl $-EFAULT, (%ebx)
28324 + movl $-EFAULT, %ss:(%ebx)
28325 jmp 7b
28326 .previous
28327
28328 +#ifdef CONFIG_PAX_MEMORY_UDEREF
28329 + pushl_cfi %ss
28330 + popl_cfi %ds
28331 + pushl_cfi %ss
28332 + popl_cfi %es
28333 +#endif
28334 +
28335 popl_cfi %esi
28336 CFI_RESTORE esi
28337 popl_cfi %edi
28338 @@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
28339 CFI_RESTORE ebx
28340 ret
28341 CFI_ENDPROC
28342 -ENDPROC(csum_partial_copy_generic)
28343 +ENDPROC(csum_partial_copy_generic_to_user)
28344
28345 #undef ROUND
28346 #undef ROUND1
28347 diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
28348 index f2145cf..cea889d 100644
28349 --- a/arch/x86/lib/clear_page_64.S
28350 +++ b/arch/x86/lib/clear_page_64.S
28351 @@ -11,6 +11,7 @@ ENTRY(clear_page_c)
28352 movl $4096/8,%ecx
28353 xorl %eax,%eax
28354 rep stosq
28355 + pax_force_retaddr
28356 ret
28357 CFI_ENDPROC
28358 ENDPROC(clear_page_c)
28359 @@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
28360 movl $4096,%ecx
28361 xorl %eax,%eax
28362 rep stosb
28363 + pax_force_retaddr
28364 ret
28365 CFI_ENDPROC
28366 ENDPROC(clear_page_c_e)
28367 @@ -43,6 +45,7 @@ ENTRY(clear_page)
28368 leaq 64(%rdi),%rdi
28369 jnz .Lloop
28370 nop
28371 + pax_force_retaddr
28372 ret
28373 CFI_ENDPROC
28374 .Lclear_page_end:
28375 @@ -58,7 +61,7 @@ ENDPROC(clear_page)
28376
28377 #include <asm/cpufeature.h>
28378
28379 - .section .altinstr_replacement,"ax"
28380 + .section .altinstr_replacement,"a"
28381 1: .byte 0xeb /* jmp <disp8> */
28382 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
28383 2: .byte 0xeb /* jmp <disp8> */
28384 diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
28385 index 1e572c5..2a162cd 100644
28386 --- a/arch/x86/lib/cmpxchg16b_emu.S
28387 +++ b/arch/x86/lib/cmpxchg16b_emu.S
28388 @@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
28389
28390 popf
28391 mov $1, %al
28392 + pax_force_retaddr
28393 ret
28394
28395 not_same:
28396 popf
28397 xor %al,%al
28398 + pax_force_retaddr
28399 ret
28400
28401 CFI_ENDPROC
28402 diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
28403 index 176cca6..e0d658e 100644
28404 --- a/arch/x86/lib/copy_page_64.S
28405 +++ b/arch/x86/lib/copy_page_64.S
28406 @@ -9,6 +9,7 @@ copy_page_rep:
28407 CFI_STARTPROC
28408 movl $4096/8, %ecx
28409 rep movsq
28410 + pax_force_retaddr
28411 ret
28412 CFI_ENDPROC
28413 ENDPROC(copy_page_rep)
28414 @@ -24,8 +25,8 @@ ENTRY(copy_page)
28415 CFI_ADJUST_CFA_OFFSET 2*8
28416 movq %rbx, (%rsp)
28417 CFI_REL_OFFSET rbx, 0
28418 - movq %r12, 1*8(%rsp)
28419 - CFI_REL_OFFSET r12, 1*8
28420 + movq %r13, 1*8(%rsp)
28421 + CFI_REL_OFFSET r13, 1*8
28422
28423 movl $(4096/64)-5, %ecx
28424 .p2align 4
28425 @@ -38,7 +39,7 @@ ENTRY(copy_page)
28426 movq 0x8*4(%rsi), %r9
28427 movq 0x8*5(%rsi), %r10
28428 movq 0x8*6(%rsi), %r11
28429 - movq 0x8*7(%rsi), %r12
28430 + movq 0x8*7(%rsi), %r13
28431
28432 prefetcht0 5*64(%rsi)
28433
28434 @@ -49,7 +50,7 @@ ENTRY(copy_page)
28435 movq %r9, 0x8*4(%rdi)
28436 movq %r10, 0x8*5(%rdi)
28437 movq %r11, 0x8*6(%rdi)
28438 - movq %r12, 0x8*7(%rdi)
28439 + movq %r13, 0x8*7(%rdi)
28440
28441 leaq 64 (%rsi), %rsi
28442 leaq 64 (%rdi), %rdi
28443 @@ -68,7 +69,7 @@ ENTRY(copy_page)
28444 movq 0x8*4(%rsi), %r9
28445 movq 0x8*5(%rsi), %r10
28446 movq 0x8*6(%rsi), %r11
28447 - movq 0x8*7(%rsi), %r12
28448 + movq 0x8*7(%rsi), %r13
28449
28450 movq %rax, 0x8*0(%rdi)
28451 movq %rbx, 0x8*1(%rdi)
28452 @@ -77,7 +78,7 @@ ENTRY(copy_page)
28453 movq %r9, 0x8*4(%rdi)
28454 movq %r10, 0x8*5(%rdi)
28455 movq %r11, 0x8*6(%rdi)
28456 - movq %r12, 0x8*7(%rdi)
28457 + movq %r13, 0x8*7(%rdi)
28458
28459 leaq 64(%rdi), %rdi
28460 leaq 64(%rsi), %rsi
28461 @@ -85,10 +86,11 @@ ENTRY(copy_page)
28462
28463 movq (%rsp), %rbx
28464 CFI_RESTORE rbx
28465 - movq 1*8(%rsp), %r12
28466 - CFI_RESTORE r12
28467 + movq 1*8(%rsp), %r13
28468 + CFI_RESTORE r13
28469 addq $2*8, %rsp
28470 CFI_ADJUST_CFA_OFFSET -2*8
28471 + pax_force_retaddr
28472 ret
28473 .Lcopy_page_end:
28474 CFI_ENDPROC
28475 @@ -99,7 +101,7 @@ ENDPROC(copy_page)
28476
28477 #include <asm/cpufeature.h>
28478
28479 - .section .altinstr_replacement,"ax"
28480 + .section .altinstr_replacement,"a"
28481 1: .byte 0xeb /* jmp <disp8> */
28482 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
28483 2:
28484 diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
28485 index a30ca15..407412b 100644
28486 --- a/arch/x86/lib/copy_user_64.S
28487 +++ b/arch/x86/lib/copy_user_64.S
28488 @@ -18,31 +18,7 @@
28489 #include <asm/alternative-asm.h>
28490 #include <asm/asm.h>
28491 #include <asm/smap.h>
28492 -
28493 -/*
28494 - * By placing feature2 after feature1 in altinstructions section, we logically
28495 - * implement:
28496 - * If CPU has feature2, jmp to alt2 is used
28497 - * else if CPU has feature1, jmp to alt1 is used
28498 - * else jmp to orig is used.
28499 - */
28500 - .macro ALTERNATIVE_JUMP feature1,feature2,orig,alt1,alt2
28501 -0:
28502 - .byte 0xe9 /* 32bit jump */
28503 - .long \orig-1f /* by default jump to orig */
28504 -1:
28505 - .section .altinstr_replacement,"ax"
28506 -2: .byte 0xe9 /* near jump with 32bit immediate */
28507 - .long \alt1-1b /* offset */ /* or alternatively to alt1 */
28508 -3: .byte 0xe9 /* near jump with 32bit immediate */
28509 - .long \alt2-1b /* offset */ /* or alternatively to alt2 */
28510 - .previous
28511 -
28512 - .section .altinstructions,"a"
28513 - altinstruction_entry 0b,2b,\feature1,5,5
28514 - altinstruction_entry 0b,3b,\feature2,5,5
28515 - .previous
28516 - .endm
28517 +#include <asm/pgtable.h>
28518
28519 .macro ALIGN_DESTINATION
28520 #ifdef FIX_ALIGNMENT
28521 @@ -70,52 +46,6 @@
28522 #endif
28523 .endm
28524
28525 -/* Standard copy_to_user with segment limit checking */
28526 -ENTRY(_copy_to_user)
28527 - CFI_STARTPROC
28528 - GET_THREAD_INFO(%rax)
28529 - movq %rdi,%rcx
28530 - addq %rdx,%rcx
28531 - jc bad_to_user
28532 - cmpq TI_addr_limit(%rax),%rcx
28533 - ja bad_to_user
28534 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
28535 - copy_user_generic_unrolled,copy_user_generic_string, \
28536 - copy_user_enhanced_fast_string
28537 - CFI_ENDPROC
28538 -ENDPROC(_copy_to_user)
28539 -
28540 -/* Standard copy_from_user with segment limit checking */
28541 -ENTRY(_copy_from_user)
28542 - CFI_STARTPROC
28543 - GET_THREAD_INFO(%rax)
28544 - movq %rsi,%rcx
28545 - addq %rdx,%rcx
28546 - jc bad_from_user
28547 - cmpq TI_addr_limit(%rax),%rcx
28548 - ja bad_from_user
28549 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
28550 - copy_user_generic_unrolled,copy_user_generic_string, \
28551 - copy_user_enhanced_fast_string
28552 - CFI_ENDPROC
28553 -ENDPROC(_copy_from_user)
28554 -
28555 - .section .fixup,"ax"
28556 - /* must zero dest */
28557 -ENTRY(bad_from_user)
28558 -bad_from_user:
28559 - CFI_STARTPROC
28560 - movl %edx,%ecx
28561 - xorl %eax,%eax
28562 - rep
28563 - stosb
28564 -bad_to_user:
28565 - movl %edx,%eax
28566 - ret
28567 - CFI_ENDPROC
28568 -ENDPROC(bad_from_user)
28569 - .previous
28570 -
28571 /*
28572 * copy_user_generic_unrolled - memory copy with exception handling.
28573 * This version is for CPUs like P4 that don't have efficient micro
28574 @@ -131,6 +61,7 @@ ENDPROC(bad_from_user)
28575 */
28576 ENTRY(copy_user_generic_unrolled)
28577 CFI_STARTPROC
28578 + ASM_PAX_OPEN_USERLAND
28579 ASM_STAC
28580 cmpl $8,%edx
28581 jb 20f /* less then 8 bytes, go to byte copy loop */
28582 @@ -180,6 +111,8 @@ ENTRY(copy_user_generic_unrolled)
28583 jnz 21b
28584 23: xor %eax,%eax
28585 ASM_CLAC
28586 + ASM_PAX_CLOSE_USERLAND
28587 + pax_force_retaddr
28588 ret
28589
28590 .section .fixup,"ax"
28591 @@ -235,6 +168,7 @@ ENDPROC(copy_user_generic_unrolled)
28592 */
28593 ENTRY(copy_user_generic_string)
28594 CFI_STARTPROC
28595 + ASM_PAX_OPEN_USERLAND
28596 ASM_STAC
28597 andl %edx,%edx
28598 jz 4f
28599 @@ -251,6 +185,8 @@ ENTRY(copy_user_generic_string)
28600 movsb
28601 4: xorl %eax,%eax
28602 ASM_CLAC
28603 + ASM_PAX_CLOSE_USERLAND
28604 + pax_force_retaddr
28605 ret
28606
28607 .section .fixup,"ax"
28608 @@ -278,6 +214,7 @@ ENDPROC(copy_user_generic_string)
28609 */
28610 ENTRY(copy_user_enhanced_fast_string)
28611 CFI_STARTPROC
28612 + ASM_PAX_OPEN_USERLAND
28613 ASM_STAC
28614 andl %edx,%edx
28615 jz 2f
28616 @@ -286,6 +223,8 @@ ENTRY(copy_user_enhanced_fast_string)
28617 movsb
28618 2: xorl %eax,%eax
28619 ASM_CLAC
28620 + ASM_PAX_CLOSE_USERLAND
28621 + pax_force_retaddr
28622 ret
28623
28624 .section .fixup,"ax"
28625 diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
28626 index 6a4f43c..c70fb52 100644
28627 --- a/arch/x86/lib/copy_user_nocache_64.S
28628 +++ b/arch/x86/lib/copy_user_nocache_64.S
28629 @@ -8,6 +8,7 @@
28630
28631 #include <linux/linkage.h>
28632 #include <asm/dwarf2.h>
28633 +#include <asm/alternative-asm.h>
28634
28635 #define FIX_ALIGNMENT 1
28636
28637 @@ -16,6 +17,7 @@
28638 #include <asm/thread_info.h>
28639 #include <asm/asm.h>
28640 #include <asm/smap.h>
28641 +#include <asm/pgtable.h>
28642
28643 .macro ALIGN_DESTINATION
28644 #ifdef FIX_ALIGNMENT
28645 @@ -49,6 +51,16 @@
28646 */
28647 ENTRY(__copy_user_nocache)
28648 CFI_STARTPROC
28649 +
28650 +#ifdef CONFIG_PAX_MEMORY_UDEREF
28651 + mov pax_user_shadow_base,%rcx
28652 + cmp %rcx,%rsi
28653 + jae 1f
28654 + add %rcx,%rsi
28655 +1:
28656 +#endif
28657 +
28658 + ASM_PAX_OPEN_USERLAND
28659 ASM_STAC
28660 cmpl $8,%edx
28661 jb 20f /* less then 8 bytes, go to byte copy loop */
28662 @@ -98,7 +110,9 @@ ENTRY(__copy_user_nocache)
28663 jnz 21b
28664 23: xorl %eax,%eax
28665 ASM_CLAC
28666 + ASM_PAX_CLOSE_USERLAND
28667 sfence
28668 + pax_force_retaddr
28669 ret
28670
28671 .section .fixup,"ax"
28672 diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
28673 index 2419d5f..fe52d0e 100644
28674 --- a/arch/x86/lib/csum-copy_64.S
28675 +++ b/arch/x86/lib/csum-copy_64.S
28676 @@ -9,6 +9,7 @@
28677 #include <asm/dwarf2.h>
28678 #include <asm/errno.h>
28679 #include <asm/asm.h>
28680 +#include <asm/alternative-asm.h>
28681
28682 /*
28683 * Checksum copy with exception handling.
28684 @@ -56,8 +57,8 @@ ENTRY(csum_partial_copy_generic)
28685 CFI_ADJUST_CFA_OFFSET 7*8
28686 movq %rbx, 2*8(%rsp)
28687 CFI_REL_OFFSET rbx, 2*8
28688 - movq %r12, 3*8(%rsp)
28689 - CFI_REL_OFFSET r12, 3*8
28690 + movq %r15, 3*8(%rsp)
28691 + CFI_REL_OFFSET r15, 3*8
28692 movq %r14, 4*8(%rsp)
28693 CFI_REL_OFFSET r14, 4*8
28694 movq %r13, 5*8(%rsp)
28695 @@ -72,16 +73,16 @@ ENTRY(csum_partial_copy_generic)
28696 movl %edx, %ecx
28697
28698 xorl %r9d, %r9d
28699 - movq %rcx, %r12
28700 + movq %rcx, %r15
28701
28702 - shrq $6, %r12
28703 + shrq $6, %r15
28704 jz .Lhandle_tail /* < 64 */
28705
28706 clc
28707
28708 /* main loop. clear in 64 byte blocks */
28709 /* r9: zero, r8: temp2, rbx: temp1, rax: sum, rcx: saved length */
28710 - /* r11: temp3, rdx: temp4, r12 loopcnt */
28711 + /* r11: temp3, rdx: temp4, r15 loopcnt */
28712 /* r10: temp5, rbp: temp6, r14 temp7, r13 temp8 */
28713 .p2align 4
28714 .Lloop:
28715 @@ -115,7 +116,7 @@ ENTRY(csum_partial_copy_generic)
28716 adcq %r14, %rax
28717 adcq %r13, %rax
28718
28719 - decl %r12d
28720 + decl %r15d
28721
28722 dest
28723 movq %rbx, (%rsi)
28724 @@ -210,8 +211,8 @@ ENTRY(csum_partial_copy_generic)
28725 .Lende:
28726 movq 2*8(%rsp), %rbx
28727 CFI_RESTORE rbx
28728 - movq 3*8(%rsp), %r12
28729 - CFI_RESTORE r12
28730 + movq 3*8(%rsp), %r15
28731 + CFI_RESTORE r15
28732 movq 4*8(%rsp), %r14
28733 CFI_RESTORE r14
28734 movq 5*8(%rsp), %r13
28735 @@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
28736 CFI_RESTORE rbp
28737 addq $7*8, %rsp
28738 CFI_ADJUST_CFA_OFFSET -7*8
28739 + pax_force_retaddr
28740 ret
28741 CFI_RESTORE_STATE
28742
28743 diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
28744 index 7609e0e..b449b98 100644
28745 --- a/arch/x86/lib/csum-wrappers_64.c
28746 +++ b/arch/x86/lib/csum-wrappers_64.c
28747 @@ -53,10 +53,12 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
28748 len -= 2;
28749 }
28750 }
28751 + pax_open_userland();
28752 stac();
28753 - isum = csum_partial_copy_generic((__force const void *)src,
28754 + isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
28755 dst, len, isum, errp, NULL);
28756 clac();
28757 + pax_close_userland();
28758 if (unlikely(*errp))
28759 goto out_err;
28760
28761 @@ -110,10 +112,12 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
28762 }
28763
28764 *errp = 0;
28765 + pax_open_userland();
28766 stac();
28767 - ret = csum_partial_copy_generic(src, (void __force *)dst,
28768 + ret = csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
28769 len, isum, NULL, errp);
28770 clac();
28771 + pax_close_userland();
28772 return ret;
28773 }
28774 EXPORT_SYMBOL(csum_partial_copy_to_user);
28775 diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
28776 index a451235..1daa956 100644
28777 --- a/arch/x86/lib/getuser.S
28778 +++ b/arch/x86/lib/getuser.S
28779 @@ -33,17 +33,40 @@
28780 #include <asm/thread_info.h>
28781 #include <asm/asm.h>
28782 #include <asm/smap.h>
28783 +#include <asm/segment.h>
28784 +#include <asm/pgtable.h>
28785 +#include <asm/alternative-asm.h>
28786 +
28787 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
28788 +#define __copyuser_seg gs;
28789 +#else
28790 +#define __copyuser_seg
28791 +#endif
28792
28793 .text
28794 ENTRY(__get_user_1)
28795 CFI_STARTPROC
28796 +
28797 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28798 GET_THREAD_INFO(%_ASM_DX)
28799 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28800 jae bad_get_user
28801 ASM_STAC
28802 -1: movzbl (%_ASM_AX),%edx
28803 +
28804 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28805 + mov pax_user_shadow_base,%_ASM_DX
28806 + cmp %_ASM_DX,%_ASM_AX
28807 + jae 1234f
28808 + add %_ASM_DX,%_ASM_AX
28809 +1234:
28810 +#endif
28811 +
28812 +#endif
28813 +
28814 +1: __copyuser_seg movzbl (%_ASM_AX),%edx
28815 xor %eax,%eax
28816 ASM_CLAC
28817 + pax_force_retaddr
28818 ret
28819 CFI_ENDPROC
28820 ENDPROC(__get_user_1)
28821 @@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
28822 ENTRY(__get_user_2)
28823 CFI_STARTPROC
28824 add $1,%_ASM_AX
28825 +
28826 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28827 jc bad_get_user
28828 GET_THREAD_INFO(%_ASM_DX)
28829 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28830 jae bad_get_user
28831 ASM_STAC
28832 -2: movzwl -1(%_ASM_AX),%edx
28833 +
28834 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28835 + mov pax_user_shadow_base,%_ASM_DX
28836 + cmp %_ASM_DX,%_ASM_AX
28837 + jae 1234f
28838 + add %_ASM_DX,%_ASM_AX
28839 +1234:
28840 +#endif
28841 +
28842 +#endif
28843 +
28844 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
28845 xor %eax,%eax
28846 ASM_CLAC
28847 + pax_force_retaddr
28848 ret
28849 CFI_ENDPROC
28850 ENDPROC(__get_user_2)
28851 @@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
28852 ENTRY(__get_user_4)
28853 CFI_STARTPROC
28854 add $3,%_ASM_AX
28855 +
28856 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
28857 jc bad_get_user
28858 GET_THREAD_INFO(%_ASM_DX)
28859 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28860 jae bad_get_user
28861 ASM_STAC
28862 -3: movl -3(%_ASM_AX),%edx
28863 +
28864 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28865 + mov pax_user_shadow_base,%_ASM_DX
28866 + cmp %_ASM_DX,%_ASM_AX
28867 + jae 1234f
28868 + add %_ASM_DX,%_ASM_AX
28869 +1234:
28870 +#endif
28871 +
28872 +#endif
28873 +
28874 +3: __copyuser_seg movl -3(%_ASM_AX),%edx
28875 xor %eax,%eax
28876 ASM_CLAC
28877 + pax_force_retaddr
28878 ret
28879 CFI_ENDPROC
28880 ENDPROC(__get_user_4)
28881 @@ -86,10 +137,20 @@ ENTRY(__get_user_8)
28882 GET_THREAD_INFO(%_ASM_DX)
28883 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28884 jae bad_get_user
28885 +
28886 +#ifdef CONFIG_PAX_MEMORY_UDEREF
28887 + mov pax_user_shadow_base,%_ASM_DX
28888 + cmp %_ASM_DX,%_ASM_AX
28889 + jae 1234f
28890 + add %_ASM_DX,%_ASM_AX
28891 +1234:
28892 +#endif
28893 +
28894 ASM_STAC
28895 4: movq -7(%_ASM_AX),%rdx
28896 xor %eax,%eax
28897 ASM_CLAC
28898 + pax_force_retaddr
28899 ret
28900 #else
28901 add $7,%_ASM_AX
28902 @@ -98,10 +159,11 @@ ENTRY(__get_user_8)
28903 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
28904 jae bad_get_user_8
28905 ASM_STAC
28906 -4: movl -7(%_ASM_AX),%edx
28907 -5: movl -3(%_ASM_AX),%ecx
28908 +4: __copyuser_seg movl -7(%_ASM_AX),%edx
28909 +5: __copyuser_seg movl -3(%_ASM_AX),%ecx
28910 xor %eax,%eax
28911 ASM_CLAC
28912 + pax_force_retaddr
28913 ret
28914 #endif
28915 CFI_ENDPROC
28916 @@ -113,6 +175,7 @@ bad_get_user:
28917 xor %edx,%edx
28918 mov $(-EFAULT),%_ASM_AX
28919 ASM_CLAC
28920 + pax_force_retaddr
28921 ret
28922 CFI_ENDPROC
28923 END(bad_get_user)
28924 @@ -124,6 +187,7 @@ bad_get_user_8:
28925 xor %ecx,%ecx
28926 mov $(-EFAULT),%_ASM_AX
28927 ASM_CLAC
28928 + pax_force_retaddr
28929 ret
28930 CFI_ENDPROC
28931 END(bad_get_user_8)
28932 diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
28933 index 54fcffe..7be149e 100644
28934 --- a/arch/x86/lib/insn.c
28935 +++ b/arch/x86/lib/insn.c
28936 @@ -20,8 +20,10 @@
28937
28938 #ifdef __KERNEL__
28939 #include <linux/string.h>
28940 +#include <asm/pgtable_types.h>
28941 #else
28942 #include <string.h>
28943 +#define ktla_ktva(addr) addr
28944 #endif
28945 #include <asm/inat.h>
28946 #include <asm/insn.h>
28947 @@ -53,8 +55,8 @@
28948 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
28949 {
28950 memset(insn, 0, sizeof(*insn));
28951 - insn->kaddr = kaddr;
28952 - insn->next_byte = kaddr;
28953 + insn->kaddr = ktla_ktva(kaddr);
28954 + insn->next_byte = ktla_ktva(kaddr);
28955 insn->x86_64 = x86_64 ? 1 : 0;
28956 insn->opnd_bytes = 4;
28957 if (x86_64)
28958 diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
28959 index 05a95e7..326f2fa 100644
28960 --- a/arch/x86/lib/iomap_copy_64.S
28961 +++ b/arch/x86/lib/iomap_copy_64.S
28962 @@ -17,6 +17,7 @@
28963
28964 #include <linux/linkage.h>
28965 #include <asm/dwarf2.h>
28966 +#include <asm/alternative-asm.h>
28967
28968 /*
28969 * override generic version in lib/iomap_copy.c
28970 @@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
28971 CFI_STARTPROC
28972 movl %edx,%ecx
28973 rep movsd
28974 + pax_force_retaddr
28975 ret
28976 CFI_ENDPROC
28977 ENDPROC(__iowrite32_copy)
28978 diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
28979 index 56313a3..0db417e 100644
28980 --- a/arch/x86/lib/memcpy_64.S
28981 +++ b/arch/x86/lib/memcpy_64.S
28982 @@ -24,7 +24,7 @@
28983 * This gets patched over the unrolled variant (below) via the
28984 * alternative instructions framework:
28985 */
28986 - .section .altinstr_replacement, "ax", @progbits
28987 + .section .altinstr_replacement, "a", @progbits
28988 .Lmemcpy_c:
28989 movq %rdi, %rax
28990 movq %rdx, %rcx
28991 @@ -33,6 +33,7 @@
28992 rep movsq
28993 movl %edx, %ecx
28994 rep movsb
28995 + pax_force_retaddr
28996 ret
28997 .Lmemcpy_e:
28998 .previous
28999 @@ -44,11 +45,12 @@
29000 * This gets patched over the unrolled variant (below) via the
29001 * alternative instructions framework:
29002 */
29003 - .section .altinstr_replacement, "ax", @progbits
29004 + .section .altinstr_replacement, "a", @progbits
29005 .Lmemcpy_c_e:
29006 movq %rdi, %rax
29007 movq %rdx, %rcx
29008 rep movsb
29009 + pax_force_retaddr
29010 ret
29011 .Lmemcpy_e_e:
29012 .previous
29013 @@ -136,6 +138,7 @@ ENTRY(memcpy)
29014 movq %r9, 1*8(%rdi)
29015 movq %r10, -2*8(%rdi, %rdx)
29016 movq %r11, -1*8(%rdi, %rdx)
29017 + pax_force_retaddr
29018 retq
29019 .p2align 4
29020 .Lless_16bytes:
29021 @@ -148,6 +151,7 @@ ENTRY(memcpy)
29022 movq -1*8(%rsi, %rdx), %r9
29023 movq %r8, 0*8(%rdi)
29024 movq %r9, -1*8(%rdi, %rdx)
29025 + pax_force_retaddr
29026 retq
29027 .p2align 4
29028 .Lless_8bytes:
29029 @@ -161,6 +165,7 @@ ENTRY(memcpy)
29030 movl -4(%rsi, %rdx), %r8d
29031 movl %ecx, (%rdi)
29032 movl %r8d, -4(%rdi, %rdx)
29033 + pax_force_retaddr
29034 retq
29035 .p2align 4
29036 .Lless_3bytes:
29037 @@ -179,6 +184,7 @@ ENTRY(memcpy)
29038 movb %cl, (%rdi)
29039
29040 .Lend:
29041 + pax_force_retaddr
29042 retq
29043 CFI_ENDPROC
29044 ENDPROC(memcpy)
29045 diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
29046 index 65268a6..dd1de11 100644
29047 --- a/arch/x86/lib/memmove_64.S
29048 +++ b/arch/x86/lib/memmove_64.S
29049 @@ -202,14 +202,16 @@ ENTRY(memmove)
29050 movb (%rsi), %r11b
29051 movb %r11b, (%rdi)
29052 13:
29053 + pax_force_retaddr
29054 retq
29055 CFI_ENDPROC
29056
29057 - .section .altinstr_replacement,"ax"
29058 + .section .altinstr_replacement,"a"
29059 .Lmemmove_begin_forward_efs:
29060 /* Forward moving data. */
29061 movq %rdx, %rcx
29062 rep movsb
29063 + pax_force_retaddr
29064 retq
29065 .Lmemmove_end_forward_efs:
29066 .previous
29067 diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
29068 index 2dcb380..2eb79fe 100644
29069 --- a/arch/x86/lib/memset_64.S
29070 +++ b/arch/x86/lib/memset_64.S
29071 @@ -16,7 +16,7 @@
29072 *
29073 * rax original destination
29074 */
29075 - .section .altinstr_replacement, "ax", @progbits
29076 + .section .altinstr_replacement, "a", @progbits
29077 .Lmemset_c:
29078 movq %rdi,%r9
29079 movq %rdx,%rcx
29080 @@ -30,6 +30,7 @@
29081 movl %edx,%ecx
29082 rep stosb
29083 movq %r9,%rax
29084 + pax_force_retaddr
29085 ret
29086 .Lmemset_e:
29087 .previous
29088 @@ -45,13 +46,14 @@
29089 *
29090 * rax original destination
29091 */
29092 - .section .altinstr_replacement, "ax", @progbits
29093 + .section .altinstr_replacement, "a", @progbits
29094 .Lmemset_c_e:
29095 movq %rdi,%r9
29096 movb %sil,%al
29097 movq %rdx,%rcx
29098 rep stosb
29099 movq %r9,%rax
29100 + pax_force_retaddr
29101 ret
29102 .Lmemset_e_e:
29103 .previous
29104 @@ -118,6 +120,7 @@ ENTRY(__memset)
29105
29106 .Lende:
29107 movq %r10,%rax
29108 + pax_force_retaddr
29109 ret
29110
29111 CFI_RESTORE_STATE
29112 diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
29113 index c9f2d9b..e7fd2c0 100644
29114 --- a/arch/x86/lib/mmx_32.c
29115 +++ b/arch/x86/lib/mmx_32.c
29116 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
29117 {
29118 void *p;
29119 int i;
29120 + unsigned long cr0;
29121
29122 if (unlikely(in_interrupt()))
29123 return __memcpy(to, from, len);
29124 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
29125 kernel_fpu_begin();
29126
29127 __asm__ __volatile__ (
29128 - "1: prefetch (%0)\n" /* This set is 28 bytes */
29129 - " prefetch 64(%0)\n"
29130 - " prefetch 128(%0)\n"
29131 - " prefetch 192(%0)\n"
29132 - " prefetch 256(%0)\n"
29133 + "1: prefetch (%1)\n" /* This set is 28 bytes */
29134 + " prefetch 64(%1)\n"
29135 + " prefetch 128(%1)\n"
29136 + " prefetch 192(%1)\n"
29137 + " prefetch 256(%1)\n"
29138 "2: \n"
29139 ".section .fixup, \"ax\"\n"
29140 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29141 + "3: \n"
29142 +
29143 +#ifdef CONFIG_PAX_KERNEXEC
29144 + " movl %%cr0, %0\n"
29145 + " movl %0, %%eax\n"
29146 + " andl $0xFFFEFFFF, %%eax\n"
29147 + " movl %%eax, %%cr0\n"
29148 +#endif
29149 +
29150 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29151 +
29152 +#ifdef CONFIG_PAX_KERNEXEC
29153 + " movl %0, %%cr0\n"
29154 +#endif
29155 +
29156 " jmp 2b\n"
29157 ".previous\n"
29158 _ASM_EXTABLE(1b, 3b)
29159 - : : "r" (from));
29160 + : "=&r" (cr0) : "r" (from) : "ax");
29161
29162 for ( ; i > 5; i--) {
29163 __asm__ __volatile__ (
29164 - "1: prefetch 320(%0)\n"
29165 - "2: movq (%0), %%mm0\n"
29166 - " movq 8(%0), %%mm1\n"
29167 - " movq 16(%0), %%mm2\n"
29168 - " movq 24(%0), %%mm3\n"
29169 - " movq %%mm0, (%1)\n"
29170 - " movq %%mm1, 8(%1)\n"
29171 - " movq %%mm2, 16(%1)\n"
29172 - " movq %%mm3, 24(%1)\n"
29173 - " movq 32(%0), %%mm0\n"
29174 - " movq 40(%0), %%mm1\n"
29175 - " movq 48(%0), %%mm2\n"
29176 - " movq 56(%0), %%mm3\n"
29177 - " movq %%mm0, 32(%1)\n"
29178 - " movq %%mm1, 40(%1)\n"
29179 - " movq %%mm2, 48(%1)\n"
29180 - " movq %%mm3, 56(%1)\n"
29181 + "1: prefetch 320(%1)\n"
29182 + "2: movq (%1), %%mm0\n"
29183 + " movq 8(%1), %%mm1\n"
29184 + " movq 16(%1), %%mm2\n"
29185 + " movq 24(%1), %%mm3\n"
29186 + " movq %%mm0, (%2)\n"
29187 + " movq %%mm1, 8(%2)\n"
29188 + " movq %%mm2, 16(%2)\n"
29189 + " movq %%mm3, 24(%2)\n"
29190 + " movq 32(%1), %%mm0\n"
29191 + " movq 40(%1), %%mm1\n"
29192 + " movq 48(%1), %%mm2\n"
29193 + " movq 56(%1), %%mm3\n"
29194 + " movq %%mm0, 32(%2)\n"
29195 + " movq %%mm1, 40(%2)\n"
29196 + " movq %%mm2, 48(%2)\n"
29197 + " movq %%mm3, 56(%2)\n"
29198 ".section .fixup, \"ax\"\n"
29199 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29200 + "3:\n"
29201 +
29202 +#ifdef CONFIG_PAX_KERNEXEC
29203 + " movl %%cr0, %0\n"
29204 + " movl %0, %%eax\n"
29205 + " andl $0xFFFEFFFF, %%eax\n"
29206 + " movl %%eax, %%cr0\n"
29207 +#endif
29208 +
29209 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29210 +
29211 +#ifdef CONFIG_PAX_KERNEXEC
29212 + " movl %0, %%cr0\n"
29213 +#endif
29214 +
29215 " jmp 2b\n"
29216 ".previous\n"
29217 _ASM_EXTABLE(1b, 3b)
29218 - : : "r" (from), "r" (to) : "memory");
29219 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29220
29221 from += 64;
29222 to += 64;
29223 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
29224 static void fast_copy_page(void *to, void *from)
29225 {
29226 int i;
29227 + unsigned long cr0;
29228
29229 kernel_fpu_begin();
29230
29231 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
29232 * but that is for later. -AV
29233 */
29234 __asm__ __volatile__(
29235 - "1: prefetch (%0)\n"
29236 - " prefetch 64(%0)\n"
29237 - " prefetch 128(%0)\n"
29238 - " prefetch 192(%0)\n"
29239 - " prefetch 256(%0)\n"
29240 + "1: prefetch (%1)\n"
29241 + " prefetch 64(%1)\n"
29242 + " prefetch 128(%1)\n"
29243 + " prefetch 192(%1)\n"
29244 + " prefetch 256(%1)\n"
29245 "2: \n"
29246 ".section .fixup, \"ax\"\n"
29247 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29248 + "3: \n"
29249 +
29250 +#ifdef CONFIG_PAX_KERNEXEC
29251 + " movl %%cr0, %0\n"
29252 + " movl %0, %%eax\n"
29253 + " andl $0xFFFEFFFF, %%eax\n"
29254 + " movl %%eax, %%cr0\n"
29255 +#endif
29256 +
29257 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29258 +
29259 +#ifdef CONFIG_PAX_KERNEXEC
29260 + " movl %0, %%cr0\n"
29261 +#endif
29262 +
29263 " jmp 2b\n"
29264 ".previous\n"
29265 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
29266 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
29267
29268 for (i = 0; i < (4096-320)/64; i++) {
29269 __asm__ __volatile__ (
29270 - "1: prefetch 320(%0)\n"
29271 - "2: movq (%0), %%mm0\n"
29272 - " movntq %%mm0, (%1)\n"
29273 - " movq 8(%0), %%mm1\n"
29274 - " movntq %%mm1, 8(%1)\n"
29275 - " movq 16(%0), %%mm2\n"
29276 - " movntq %%mm2, 16(%1)\n"
29277 - " movq 24(%0), %%mm3\n"
29278 - " movntq %%mm3, 24(%1)\n"
29279 - " movq 32(%0), %%mm4\n"
29280 - " movntq %%mm4, 32(%1)\n"
29281 - " movq 40(%0), %%mm5\n"
29282 - " movntq %%mm5, 40(%1)\n"
29283 - " movq 48(%0), %%mm6\n"
29284 - " movntq %%mm6, 48(%1)\n"
29285 - " movq 56(%0), %%mm7\n"
29286 - " movntq %%mm7, 56(%1)\n"
29287 + "1: prefetch 320(%1)\n"
29288 + "2: movq (%1), %%mm0\n"
29289 + " movntq %%mm0, (%2)\n"
29290 + " movq 8(%1), %%mm1\n"
29291 + " movntq %%mm1, 8(%2)\n"
29292 + " movq 16(%1), %%mm2\n"
29293 + " movntq %%mm2, 16(%2)\n"
29294 + " movq 24(%1), %%mm3\n"
29295 + " movntq %%mm3, 24(%2)\n"
29296 + " movq 32(%1), %%mm4\n"
29297 + " movntq %%mm4, 32(%2)\n"
29298 + " movq 40(%1), %%mm5\n"
29299 + " movntq %%mm5, 40(%2)\n"
29300 + " movq 48(%1), %%mm6\n"
29301 + " movntq %%mm6, 48(%2)\n"
29302 + " movq 56(%1), %%mm7\n"
29303 + " movntq %%mm7, 56(%2)\n"
29304 ".section .fixup, \"ax\"\n"
29305 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29306 + "3:\n"
29307 +
29308 +#ifdef CONFIG_PAX_KERNEXEC
29309 + " movl %%cr0, %0\n"
29310 + " movl %0, %%eax\n"
29311 + " andl $0xFFFEFFFF, %%eax\n"
29312 + " movl %%eax, %%cr0\n"
29313 +#endif
29314 +
29315 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29316 +
29317 +#ifdef CONFIG_PAX_KERNEXEC
29318 + " movl %0, %%cr0\n"
29319 +#endif
29320 +
29321 " jmp 2b\n"
29322 ".previous\n"
29323 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
29324 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29325
29326 from += 64;
29327 to += 64;
29328 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
29329 static void fast_copy_page(void *to, void *from)
29330 {
29331 int i;
29332 + unsigned long cr0;
29333
29334 kernel_fpu_begin();
29335
29336 __asm__ __volatile__ (
29337 - "1: prefetch (%0)\n"
29338 - " prefetch 64(%0)\n"
29339 - " prefetch 128(%0)\n"
29340 - " prefetch 192(%0)\n"
29341 - " prefetch 256(%0)\n"
29342 + "1: prefetch (%1)\n"
29343 + " prefetch 64(%1)\n"
29344 + " prefetch 128(%1)\n"
29345 + " prefetch 192(%1)\n"
29346 + " prefetch 256(%1)\n"
29347 "2: \n"
29348 ".section .fixup, \"ax\"\n"
29349 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29350 + "3: \n"
29351 +
29352 +#ifdef CONFIG_PAX_KERNEXEC
29353 + " movl %%cr0, %0\n"
29354 + " movl %0, %%eax\n"
29355 + " andl $0xFFFEFFFF, %%eax\n"
29356 + " movl %%eax, %%cr0\n"
29357 +#endif
29358 +
29359 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
29360 +
29361 +#ifdef CONFIG_PAX_KERNEXEC
29362 + " movl %0, %%cr0\n"
29363 +#endif
29364 +
29365 " jmp 2b\n"
29366 ".previous\n"
29367 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
29368 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
29369
29370 for (i = 0; i < 4096/64; i++) {
29371 __asm__ __volatile__ (
29372 - "1: prefetch 320(%0)\n"
29373 - "2: movq (%0), %%mm0\n"
29374 - " movq 8(%0), %%mm1\n"
29375 - " movq 16(%0), %%mm2\n"
29376 - " movq 24(%0), %%mm3\n"
29377 - " movq %%mm0, (%1)\n"
29378 - " movq %%mm1, 8(%1)\n"
29379 - " movq %%mm2, 16(%1)\n"
29380 - " movq %%mm3, 24(%1)\n"
29381 - " movq 32(%0), %%mm0\n"
29382 - " movq 40(%0), %%mm1\n"
29383 - " movq 48(%0), %%mm2\n"
29384 - " movq 56(%0), %%mm3\n"
29385 - " movq %%mm0, 32(%1)\n"
29386 - " movq %%mm1, 40(%1)\n"
29387 - " movq %%mm2, 48(%1)\n"
29388 - " movq %%mm3, 56(%1)\n"
29389 + "1: prefetch 320(%1)\n"
29390 + "2: movq (%1), %%mm0\n"
29391 + " movq 8(%1), %%mm1\n"
29392 + " movq 16(%1), %%mm2\n"
29393 + " movq 24(%1), %%mm3\n"
29394 + " movq %%mm0, (%2)\n"
29395 + " movq %%mm1, 8(%2)\n"
29396 + " movq %%mm2, 16(%2)\n"
29397 + " movq %%mm3, 24(%2)\n"
29398 + " movq 32(%1), %%mm0\n"
29399 + " movq 40(%1), %%mm1\n"
29400 + " movq 48(%1), %%mm2\n"
29401 + " movq 56(%1), %%mm3\n"
29402 + " movq %%mm0, 32(%2)\n"
29403 + " movq %%mm1, 40(%2)\n"
29404 + " movq %%mm2, 48(%2)\n"
29405 + " movq %%mm3, 56(%2)\n"
29406 ".section .fixup, \"ax\"\n"
29407 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29408 + "3:\n"
29409 +
29410 +#ifdef CONFIG_PAX_KERNEXEC
29411 + " movl %%cr0, %0\n"
29412 + " movl %0, %%eax\n"
29413 + " andl $0xFFFEFFFF, %%eax\n"
29414 + " movl %%eax, %%cr0\n"
29415 +#endif
29416 +
29417 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
29418 +
29419 +#ifdef CONFIG_PAX_KERNEXEC
29420 + " movl %0, %%cr0\n"
29421 +#endif
29422 +
29423 " jmp 2b\n"
29424 ".previous\n"
29425 _ASM_EXTABLE(1b, 3b)
29426 - : : "r" (from), "r" (to) : "memory");
29427 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
29428
29429 from += 64;
29430 to += 64;
29431 diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
29432 index f6d13ee..d789440 100644
29433 --- a/arch/x86/lib/msr-reg.S
29434 +++ b/arch/x86/lib/msr-reg.S
29435 @@ -3,6 +3,7 @@
29436 #include <asm/dwarf2.h>
29437 #include <asm/asm.h>
29438 #include <asm/msr.h>
29439 +#include <asm/alternative-asm.h>
29440
29441 #ifdef CONFIG_X86_64
29442 /*
29443 @@ -37,6 +38,7 @@ ENTRY(\op\()_safe_regs)
29444 movl %edi, 28(%r10)
29445 popq_cfi %rbp
29446 popq_cfi %rbx
29447 + pax_force_retaddr
29448 ret
29449 3:
29450 CFI_RESTORE_STATE
29451 diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
29452 index fc6ba17..d4d989d 100644
29453 --- a/arch/x86/lib/putuser.S
29454 +++ b/arch/x86/lib/putuser.S
29455 @@ -16,7 +16,9 @@
29456 #include <asm/errno.h>
29457 #include <asm/asm.h>
29458 #include <asm/smap.h>
29459 -
29460 +#include <asm/segment.h>
29461 +#include <asm/pgtable.h>
29462 +#include <asm/alternative-asm.h>
29463
29464 /*
29465 * __put_user_X
29466 @@ -30,57 +32,125 @@
29467 * as they get called from within inline assembly.
29468 */
29469
29470 -#define ENTER CFI_STARTPROC ; \
29471 - GET_THREAD_INFO(%_ASM_BX)
29472 -#define EXIT ASM_CLAC ; \
29473 - ret ; \
29474 +#define ENTER CFI_STARTPROC
29475 +#define EXIT ASM_CLAC ; \
29476 + pax_force_retaddr ; \
29477 + ret ; \
29478 CFI_ENDPROC
29479
29480 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29481 +#define _DEST %_ASM_CX,%_ASM_BX
29482 +#else
29483 +#define _DEST %_ASM_CX
29484 +#endif
29485 +
29486 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
29487 +#define __copyuser_seg gs;
29488 +#else
29489 +#define __copyuser_seg
29490 +#endif
29491 +
29492 .text
29493 ENTRY(__put_user_1)
29494 ENTER
29495 +
29496 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29497 + GET_THREAD_INFO(%_ASM_BX)
29498 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
29499 jae bad_put_user
29500 ASM_STAC
29501 -1: movb %al,(%_ASM_CX)
29502 +
29503 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29504 + mov pax_user_shadow_base,%_ASM_BX
29505 + cmp %_ASM_BX,%_ASM_CX
29506 + jb 1234f
29507 + xor %ebx,%ebx
29508 +1234:
29509 +#endif
29510 +
29511 +#endif
29512 +
29513 +1: __copyuser_seg movb %al,(_DEST)
29514 xor %eax,%eax
29515 EXIT
29516 ENDPROC(__put_user_1)
29517
29518 ENTRY(__put_user_2)
29519 ENTER
29520 +
29521 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29522 + GET_THREAD_INFO(%_ASM_BX)
29523 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
29524 sub $1,%_ASM_BX
29525 cmp %_ASM_BX,%_ASM_CX
29526 jae bad_put_user
29527 ASM_STAC
29528 -2: movw %ax,(%_ASM_CX)
29529 +
29530 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29531 + mov pax_user_shadow_base,%_ASM_BX
29532 + cmp %_ASM_BX,%_ASM_CX
29533 + jb 1234f
29534 + xor %ebx,%ebx
29535 +1234:
29536 +#endif
29537 +
29538 +#endif
29539 +
29540 +2: __copyuser_seg movw %ax,(_DEST)
29541 xor %eax,%eax
29542 EXIT
29543 ENDPROC(__put_user_2)
29544
29545 ENTRY(__put_user_4)
29546 ENTER
29547 +
29548 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29549 + GET_THREAD_INFO(%_ASM_BX)
29550 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
29551 sub $3,%_ASM_BX
29552 cmp %_ASM_BX,%_ASM_CX
29553 jae bad_put_user
29554 ASM_STAC
29555 -3: movl %eax,(%_ASM_CX)
29556 +
29557 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29558 + mov pax_user_shadow_base,%_ASM_BX
29559 + cmp %_ASM_BX,%_ASM_CX
29560 + jb 1234f
29561 + xor %ebx,%ebx
29562 +1234:
29563 +#endif
29564 +
29565 +#endif
29566 +
29567 +3: __copyuser_seg movl %eax,(_DEST)
29568 xor %eax,%eax
29569 EXIT
29570 ENDPROC(__put_user_4)
29571
29572 ENTRY(__put_user_8)
29573 ENTER
29574 +
29575 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
29576 + GET_THREAD_INFO(%_ASM_BX)
29577 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
29578 sub $7,%_ASM_BX
29579 cmp %_ASM_BX,%_ASM_CX
29580 jae bad_put_user
29581 ASM_STAC
29582 -4: mov %_ASM_AX,(%_ASM_CX)
29583 +
29584 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29585 + mov pax_user_shadow_base,%_ASM_BX
29586 + cmp %_ASM_BX,%_ASM_CX
29587 + jb 1234f
29588 + xor %ebx,%ebx
29589 +1234:
29590 +#endif
29591 +
29592 +#endif
29593 +
29594 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
29595 #ifdef CONFIG_X86_32
29596 -5: movl %edx,4(%_ASM_CX)
29597 +5: __copyuser_seg movl %edx,4(_DEST)
29598 #endif
29599 xor %eax,%eax
29600 EXIT
29601 diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
29602 index 1cad221..de671ee 100644
29603 --- a/arch/x86/lib/rwlock.S
29604 +++ b/arch/x86/lib/rwlock.S
29605 @@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
29606 FRAME
29607 0: LOCK_PREFIX
29608 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29609 +
29610 +#ifdef CONFIG_PAX_REFCOUNT
29611 + jno 1234f
29612 + LOCK_PREFIX
29613 + WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29614 + int $4
29615 +1234:
29616 + _ASM_EXTABLE(1234b, 1234b)
29617 +#endif
29618 +
29619 1: rep; nop
29620 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
29621 jne 1b
29622 LOCK_PREFIX
29623 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
29624 +
29625 +#ifdef CONFIG_PAX_REFCOUNT
29626 + jno 1234f
29627 + LOCK_PREFIX
29628 + WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
29629 + int $4
29630 +1234:
29631 + _ASM_EXTABLE(1234b, 1234b)
29632 +#endif
29633 +
29634 jnz 0b
29635 ENDFRAME
29636 + pax_force_retaddr
29637 ret
29638 CFI_ENDPROC
29639 END(__write_lock_failed)
29640 @@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
29641 FRAME
29642 0: LOCK_PREFIX
29643 READ_LOCK_SIZE(inc) (%__lock_ptr)
29644 +
29645 +#ifdef CONFIG_PAX_REFCOUNT
29646 + jno 1234f
29647 + LOCK_PREFIX
29648 + READ_LOCK_SIZE(dec) (%__lock_ptr)
29649 + int $4
29650 +1234:
29651 + _ASM_EXTABLE(1234b, 1234b)
29652 +#endif
29653 +
29654 1: rep; nop
29655 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
29656 js 1b
29657 LOCK_PREFIX
29658 READ_LOCK_SIZE(dec) (%__lock_ptr)
29659 +
29660 +#ifdef CONFIG_PAX_REFCOUNT
29661 + jno 1234f
29662 + LOCK_PREFIX
29663 + READ_LOCK_SIZE(inc) (%__lock_ptr)
29664 + int $4
29665 +1234:
29666 + _ASM_EXTABLE(1234b, 1234b)
29667 +#endif
29668 +
29669 js 0b
29670 ENDFRAME
29671 + pax_force_retaddr
29672 ret
29673 CFI_ENDPROC
29674 END(__read_lock_failed)
29675 diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
29676 index 5dff5f0..cadebf4 100644
29677 --- a/arch/x86/lib/rwsem.S
29678 +++ b/arch/x86/lib/rwsem.S
29679 @@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
29680 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29681 CFI_RESTORE __ASM_REG(dx)
29682 restore_common_regs
29683 + pax_force_retaddr
29684 ret
29685 CFI_ENDPROC
29686 ENDPROC(call_rwsem_down_read_failed)
29687 @@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
29688 movq %rax,%rdi
29689 call rwsem_down_write_failed
29690 restore_common_regs
29691 + pax_force_retaddr
29692 ret
29693 CFI_ENDPROC
29694 ENDPROC(call_rwsem_down_write_failed)
29695 @@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
29696 movq %rax,%rdi
29697 call rwsem_wake
29698 restore_common_regs
29699 -1: ret
29700 +1: pax_force_retaddr
29701 + ret
29702 CFI_ENDPROC
29703 ENDPROC(call_rwsem_wake)
29704
29705 @@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
29706 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
29707 CFI_RESTORE __ASM_REG(dx)
29708 restore_common_regs
29709 + pax_force_retaddr
29710 ret
29711 CFI_ENDPROC
29712 ENDPROC(call_rwsem_downgrade_wake)
29713 diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
29714 index a63efd6..8149fbe 100644
29715 --- a/arch/x86/lib/thunk_64.S
29716 +++ b/arch/x86/lib/thunk_64.S
29717 @@ -8,6 +8,7 @@
29718 #include <linux/linkage.h>
29719 #include <asm/dwarf2.h>
29720 #include <asm/calling.h>
29721 +#include <asm/alternative-asm.h>
29722
29723 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
29724 .macro THUNK name, func, put_ret_addr_in_rdi=0
29725 @@ -15,11 +16,11 @@
29726 \name:
29727 CFI_STARTPROC
29728
29729 - /* this one pushes 9 elems, the next one would be %rIP */
29730 - SAVE_ARGS
29731 + /* this one pushes 15+1 elems, the next one would be %rIP */
29732 + SAVE_ARGS 8
29733
29734 .if \put_ret_addr_in_rdi
29735 - movq_cfi_restore 9*8, rdi
29736 + movq_cfi_restore RIP, rdi
29737 .endif
29738
29739 call \func
29740 @@ -38,8 +39,9 @@
29741
29742 /* SAVE_ARGS below is used only for the .cfi directives it contains. */
29743 CFI_STARTPROC
29744 - SAVE_ARGS
29745 + SAVE_ARGS 8
29746 restore:
29747 - RESTORE_ARGS
29748 + RESTORE_ARGS 1,8
29749 + pax_force_retaddr
29750 ret
29751 CFI_ENDPROC
29752 diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
29753 index 3eb18ac..4b22130 100644
29754 --- a/arch/x86/lib/usercopy_32.c
29755 +++ b/arch/x86/lib/usercopy_32.c
29756 @@ -42,11 +42,13 @@ do { \
29757 int __d0; \
29758 might_fault(); \
29759 __asm__ __volatile__( \
29760 + __COPYUSER_SET_ES \
29761 ASM_STAC "\n" \
29762 "0: rep; stosl\n" \
29763 " movl %2,%0\n" \
29764 "1: rep; stosb\n" \
29765 "2: " ASM_CLAC "\n" \
29766 + __COPYUSER_RESTORE_ES \
29767 ".section .fixup,\"ax\"\n" \
29768 "3: lea 0(%2,%0,4),%0\n" \
29769 " jmp 2b\n" \
29770 @@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
29771
29772 #ifdef CONFIG_X86_INTEL_USERCOPY
29773 static unsigned long
29774 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
29775 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
29776 {
29777 int d0, d1;
29778 __asm__ __volatile__(
29779 @@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29780 " .align 2,0x90\n"
29781 "3: movl 0(%4), %%eax\n"
29782 "4: movl 4(%4), %%edx\n"
29783 - "5: movl %%eax, 0(%3)\n"
29784 - "6: movl %%edx, 4(%3)\n"
29785 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
29786 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
29787 "7: movl 8(%4), %%eax\n"
29788 "8: movl 12(%4),%%edx\n"
29789 - "9: movl %%eax, 8(%3)\n"
29790 - "10: movl %%edx, 12(%3)\n"
29791 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
29792 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
29793 "11: movl 16(%4), %%eax\n"
29794 "12: movl 20(%4), %%edx\n"
29795 - "13: movl %%eax, 16(%3)\n"
29796 - "14: movl %%edx, 20(%3)\n"
29797 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
29798 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
29799 "15: movl 24(%4), %%eax\n"
29800 "16: movl 28(%4), %%edx\n"
29801 - "17: movl %%eax, 24(%3)\n"
29802 - "18: movl %%edx, 28(%3)\n"
29803 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
29804 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
29805 "19: movl 32(%4), %%eax\n"
29806 "20: movl 36(%4), %%edx\n"
29807 - "21: movl %%eax, 32(%3)\n"
29808 - "22: movl %%edx, 36(%3)\n"
29809 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
29810 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
29811 "23: movl 40(%4), %%eax\n"
29812 "24: movl 44(%4), %%edx\n"
29813 - "25: movl %%eax, 40(%3)\n"
29814 - "26: movl %%edx, 44(%3)\n"
29815 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
29816 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
29817 "27: movl 48(%4), %%eax\n"
29818 "28: movl 52(%4), %%edx\n"
29819 - "29: movl %%eax, 48(%3)\n"
29820 - "30: movl %%edx, 52(%3)\n"
29821 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
29822 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
29823 "31: movl 56(%4), %%eax\n"
29824 "32: movl 60(%4), %%edx\n"
29825 - "33: movl %%eax, 56(%3)\n"
29826 - "34: movl %%edx, 60(%3)\n"
29827 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
29828 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
29829 " addl $-64, %0\n"
29830 " addl $64, %4\n"
29831 " addl $64, %3\n"
29832 @@ -149,10 +151,116 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
29833 " shrl $2, %0\n"
29834 " andl $3, %%eax\n"
29835 " cld\n"
29836 + __COPYUSER_SET_ES
29837 "99: rep; movsl\n"
29838 "36: movl %%eax, %0\n"
29839 "37: rep; movsb\n"
29840 "100:\n"
29841 + __COPYUSER_RESTORE_ES
29842 + ".section .fixup,\"ax\"\n"
29843 + "101: lea 0(%%eax,%0,4),%0\n"
29844 + " jmp 100b\n"
29845 + ".previous\n"
29846 + _ASM_EXTABLE(1b,100b)
29847 + _ASM_EXTABLE(2b,100b)
29848 + _ASM_EXTABLE(3b,100b)
29849 + _ASM_EXTABLE(4b,100b)
29850 + _ASM_EXTABLE(5b,100b)
29851 + _ASM_EXTABLE(6b,100b)
29852 + _ASM_EXTABLE(7b,100b)
29853 + _ASM_EXTABLE(8b,100b)
29854 + _ASM_EXTABLE(9b,100b)
29855 + _ASM_EXTABLE(10b,100b)
29856 + _ASM_EXTABLE(11b,100b)
29857 + _ASM_EXTABLE(12b,100b)
29858 + _ASM_EXTABLE(13b,100b)
29859 + _ASM_EXTABLE(14b,100b)
29860 + _ASM_EXTABLE(15b,100b)
29861 + _ASM_EXTABLE(16b,100b)
29862 + _ASM_EXTABLE(17b,100b)
29863 + _ASM_EXTABLE(18b,100b)
29864 + _ASM_EXTABLE(19b,100b)
29865 + _ASM_EXTABLE(20b,100b)
29866 + _ASM_EXTABLE(21b,100b)
29867 + _ASM_EXTABLE(22b,100b)
29868 + _ASM_EXTABLE(23b,100b)
29869 + _ASM_EXTABLE(24b,100b)
29870 + _ASM_EXTABLE(25b,100b)
29871 + _ASM_EXTABLE(26b,100b)
29872 + _ASM_EXTABLE(27b,100b)
29873 + _ASM_EXTABLE(28b,100b)
29874 + _ASM_EXTABLE(29b,100b)
29875 + _ASM_EXTABLE(30b,100b)
29876 + _ASM_EXTABLE(31b,100b)
29877 + _ASM_EXTABLE(32b,100b)
29878 + _ASM_EXTABLE(33b,100b)
29879 + _ASM_EXTABLE(34b,100b)
29880 + _ASM_EXTABLE(35b,100b)
29881 + _ASM_EXTABLE(36b,100b)
29882 + _ASM_EXTABLE(37b,100b)
29883 + _ASM_EXTABLE(99b,101b)
29884 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
29885 + : "1"(to), "2"(from), "0"(size)
29886 + : "eax", "edx", "memory");
29887 + return size;
29888 +}
29889 +
29890 +static unsigned long
29891 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
29892 +{
29893 + int d0, d1;
29894 + __asm__ __volatile__(
29895 + " .align 2,0x90\n"
29896 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
29897 + " cmpl $67, %0\n"
29898 + " jbe 3f\n"
29899 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
29900 + " .align 2,0x90\n"
29901 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
29902 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
29903 + "5: movl %%eax, 0(%3)\n"
29904 + "6: movl %%edx, 4(%3)\n"
29905 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
29906 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
29907 + "9: movl %%eax, 8(%3)\n"
29908 + "10: movl %%edx, 12(%3)\n"
29909 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
29910 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
29911 + "13: movl %%eax, 16(%3)\n"
29912 + "14: movl %%edx, 20(%3)\n"
29913 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
29914 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
29915 + "17: movl %%eax, 24(%3)\n"
29916 + "18: movl %%edx, 28(%3)\n"
29917 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
29918 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
29919 + "21: movl %%eax, 32(%3)\n"
29920 + "22: movl %%edx, 36(%3)\n"
29921 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
29922 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
29923 + "25: movl %%eax, 40(%3)\n"
29924 + "26: movl %%edx, 44(%3)\n"
29925 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
29926 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
29927 + "29: movl %%eax, 48(%3)\n"
29928 + "30: movl %%edx, 52(%3)\n"
29929 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
29930 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
29931 + "33: movl %%eax, 56(%3)\n"
29932 + "34: movl %%edx, 60(%3)\n"
29933 + " addl $-64, %0\n"
29934 + " addl $64, %4\n"
29935 + " addl $64, %3\n"
29936 + " cmpl $63, %0\n"
29937 + " ja 1b\n"
29938 + "35: movl %0, %%eax\n"
29939 + " shrl $2, %0\n"
29940 + " andl $3, %%eax\n"
29941 + " cld\n"
29942 + "99: rep; "__copyuser_seg" movsl\n"
29943 + "36: movl %%eax, %0\n"
29944 + "37: rep; "__copyuser_seg" movsb\n"
29945 + "100:\n"
29946 ".section .fixup,\"ax\"\n"
29947 "101: lea 0(%%eax,%0,4),%0\n"
29948 " jmp 100b\n"
29949 @@ -207,41 +315,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
29950 int d0, d1;
29951 __asm__ __volatile__(
29952 " .align 2,0x90\n"
29953 - "0: movl 32(%4), %%eax\n"
29954 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
29955 " cmpl $67, %0\n"
29956 " jbe 2f\n"
29957 - "1: movl 64(%4), %%eax\n"
29958 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
29959 " .align 2,0x90\n"
29960 - "2: movl 0(%4), %%eax\n"
29961 - "21: movl 4(%4), %%edx\n"
29962 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
29963 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
29964 " movl %%eax, 0(%3)\n"
29965 " movl %%edx, 4(%3)\n"
29966 - "3: movl 8(%4), %%eax\n"
29967 - "31: movl 12(%4),%%edx\n"
29968 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
29969 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
29970 " movl %%eax, 8(%3)\n"
29971 " movl %%edx, 12(%3)\n"
29972 - "4: movl 16(%4), %%eax\n"
29973 - "41: movl 20(%4), %%edx\n"
29974 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
29975 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
29976 " movl %%eax, 16(%3)\n"
29977 " movl %%edx, 20(%3)\n"
29978 - "10: movl 24(%4), %%eax\n"
29979 - "51: movl 28(%4), %%edx\n"
29980 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
29981 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
29982 " movl %%eax, 24(%3)\n"
29983 " movl %%edx, 28(%3)\n"
29984 - "11: movl 32(%4), %%eax\n"
29985 - "61: movl 36(%4), %%edx\n"
29986 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
29987 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
29988 " movl %%eax, 32(%3)\n"
29989 " movl %%edx, 36(%3)\n"
29990 - "12: movl 40(%4), %%eax\n"
29991 - "71: movl 44(%4), %%edx\n"
29992 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
29993 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
29994 " movl %%eax, 40(%3)\n"
29995 " movl %%edx, 44(%3)\n"
29996 - "13: movl 48(%4), %%eax\n"
29997 - "81: movl 52(%4), %%edx\n"
29998 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
29999 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30000 " movl %%eax, 48(%3)\n"
30001 " movl %%edx, 52(%3)\n"
30002 - "14: movl 56(%4), %%eax\n"
30003 - "91: movl 60(%4), %%edx\n"
30004 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30005 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30006 " movl %%eax, 56(%3)\n"
30007 " movl %%edx, 60(%3)\n"
30008 " addl $-64, %0\n"
30009 @@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
30010 " shrl $2, %0\n"
30011 " andl $3, %%eax\n"
30012 " cld\n"
30013 - "6: rep; movsl\n"
30014 + "6: rep; "__copyuser_seg" movsl\n"
30015 " movl %%eax,%0\n"
30016 - "7: rep; movsb\n"
30017 + "7: rep; "__copyuser_seg" movsb\n"
30018 "8:\n"
30019 ".section .fixup,\"ax\"\n"
30020 "9: lea 0(%%eax,%0,4),%0\n"
30021 @@ -305,41 +413,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
30022
30023 __asm__ __volatile__(
30024 " .align 2,0x90\n"
30025 - "0: movl 32(%4), %%eax\n"
30026 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30027 " cmpl $67, %0\n"
30028 " jbe 2f\n"
30029 - "1: movl 64(%4), %%eax\n"
30030 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30031 " .align 2,0x90\n"
30032 - "2: movl 0(%4), %%eax\n"
30033 - "21: movl 4(%4), %%edx\n"
30034 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30035 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30036 " movnti %%eax, 0(%3)\n"
30037 " movnti %%edx, 4(%3)\n"
30038 - "3: movl 8(%4), %%eax\n"
30039 - "31: movl 12(%4),%%edx\n"
30040 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30041 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30042 " movnti %%eax, 8(%3)\n"
30043 " movnti %%edx, 12(%3)\n"
30044 - "4: movl 16(%4), %%eax\n"
30045 - "41: movl 20(%4), %%edx\n"
30046 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30047 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30048 " movnti %%eax, 16(%3)\n"
30049 " movnti %%edx, 20(%3)\n"
30050 - "10: movl 24(%4), %%eax\n"
30051 - "51: movl 28(%4), %%edx\n"
30052 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30053 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30054 " movnti %%eax, 24(%3)\n"
30055 " movnti %%edx, 28(%3)\n"
30056 - "11: movl 32(%4), %%eax\n"
30057 - "61: movl 36(%4), %%edx\n"
30058 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30059 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30060 " movnti %%eax, 32(%3)\n"
30061 " movnti %%edx, 36(%3)\n"
30062 - "12: movl 40(%4), %%eax\n"
30063 - "71: movl 44(%4), %%edx\n"
30064 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30065 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30066 " movnti %%eax, 40(%3)\n"
30067 " movnti %%edx, 44(%3)\n"
30068 - "13: movl 48(%4), %%eax\n"
30069 - "81: movl 52(%4), %%edx\n"
30070 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30071 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30072 " movnti %%eax, 48(%3)\n"
30073 " movnti %%edx, 52(%3)\n"
30074 - "14: movl 56(%4), %%eax\n"
30075 - "91: movl 60(%4), %%edx\n"
30076 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30077 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30078 " movnti %%eax, 56(%3)\n"
30079 " movnti %%edx, 60(%3)\n"
30080 " addl $-64, %0\n"
30081 @@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
30082 " shrl $2, %0\n"
30083 " andl $3, %%eax\n"
30084 " cld\n"
30085 - "6: rep; movsl\n"
30086 + "6: rep; "__copyuser_seg" movsl\n"
30087 " movl %%eax,%0\n"
30088 - "7: rep; movsb\n"
30089 + "7: rep; "__copyuser_seg" movsb\n"
30090 "8:\n"
30091 ".section .fixup,\"ax\"\n"
30092 "9: lea 0(%%eax,%0,4),%0\n"
30093 @@ -399,41 +507,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
30094
30095 __asm__ __volatile__(
30096 " .align 2,0x90\n"
30097 - "0: movl 32(%4), %%eax\n"
30098 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
30099 " cmpl $67, %0\n"
30100 " jbe 2f\n"
30101 - "1: movl 64(%4), %%eax\n"
30102 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
30103 " .align 2,0x90\n"
30104 - "2: movl 0(%4), %%eax\n"
30105 - "21: movl 4(%4), %%edx\n"
30106 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
30107 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
30108 " movnti %%eax, 0(%3)\n"
30109 " movnti %%edx, 4(%3)\n"
30110 - "3: movl 8(%4), %%eax\n"
30111 - "31: movl 12(%4),%%edx\n"
30112 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
30113 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
30114 " movnti %%eax, 8(%3)\n"
30115 " movnti %%edx, 12(%3)\n"
30116 - "4: movl 16(%4), %%eax\n"
30117 - "41: movl 20(%4), %%edx\n"
30118 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
30119 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
30120 " movnti %%eax, 16(%3)\n"
30121 " movnti %%edx, 20(%3)\n"
30122 - "10: movl 24(%4), %%eax\n"
30123 - "51: movl 28(%4), %%edx\n"
30124 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
30125 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
30126 " movnti %%eax, 24(%3)\n"
30127 " movnti %%edx, 28(%3)\n"
30128 - "11: movl 32(%4), %%eax\n"
30129 - "61: movl 36(%4), %%edx\n"
30130 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
30131 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
30132 " movnti %%eax, 32(%3)\n"
30133 " movnti %%edx, 36(%3)\n"
30134 - "12: movl 40(%4), %%eax\n"
30135 - "71: movl 44(%4), %%edx\n"
30136 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
30137 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
30138 " movnti %%eax, 40(%3)\n"
30139 " movnti %%edx, 44(%3)\n"
30140 - "13: movl 48(%4), %%eax\n"
30141 - "81: movl 52(%4), %%edx\n"
30142 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
30143 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
30144 " movnti %%eax, 48(%3)\n"
30145 " movnti %%edx, 52(%3)\n"
30146 - "14: movl 56(%4), %%eax\n"
30147 - "91: movl 60(%4), %%edx\n"
30148 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
30149 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
30150 " movnti %%eax, 56(%3)\n"
30151 " movnti %%edx, 60(%3)\n"
30152 " addl $-64, %0\n"
30153 @@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
30154 " shrl $2, %0\n"
30155 " andl $3, %%eax\n"
30156 " cld\n"
30157 - "6: rep; movsl\n"
30158 + "6: rep; "__copyuser_seg" movsl\n"
30159 " movl %%eax,%0\n"
30160 - "7: rep; movsb\n"
30161 + "7: rep; "__copyuser_seg" movsb\n"
30162 "8:\n"
30163 ".section .fixup,\"ax\"\n"
30164 "9: lea 0(%%eax,%0,4),%0\n"
30165 @@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
30166 */
30167 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
30168 unsigned long size);
30169 -unsigned long __copy_user_intel(void __user *to, const void *from,
30170 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
30171 + unsigned long size);
30172 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
30173 unsigned long size);
30174 unsigned long __copy_user_zeroing_intel_nocache(void *to,
30175 const void __user *from, unsigned long size);
30176 #endif /* CONFIG_X86_INTEL_USERCOPY */
30177
30178 /* Generic arbitrary sized copy. */
30179 -#define __copy_user(to, from, size) \
30180 +#define __copy_user(to, from, size, prefix, set, restore) \
30181 do { \
30182 int __d0, __d1, __d2; \
30183 __asm__ __volatile__( \
30184 + set \
30185 " cmp $7,%0\n" \
30186 " jbe 1f\n" \
30187 " movl %1,%0\n" \
30188 " negl %0\n" \
30189 " andl $7,%0\n" \
30190 " subl %0,%3\n" \
30191 - "4: rep; movsb\n" \
30192 + "4: rep; "prefix"movsb\n" \
30193 " movl %3,%0\n" \
30194 " shrl $2,%0\n" \
30195 " andl $3,%3\n" \
30196 " .align 2,0x90\n" \
30197 - "0: rep; movsl\n" \
30198 + "0: rep; "prefix"movsl\n" \
30199 " movl %3,%0\n" \
30200 - "1: rep; movsb\n" \
30201 + "1: rep; "prefix"movsb\n" \
30202 "2:\n" \
30203 + restore \
30204 ".section .fixup,\"ax\"\n" \
30205 "5: addl %3,%0\n" \
30206 " jmp 2b\n" \
30207 @@ -538,14 +650,14 @@ do { \
30208 " negl %0\n" \
30209 " andl $7,%0\n" \
30210 " subl %0,%3\n" \
30211 - "4: rep; movsb\n" \
30212 + "4: rep; "__copyuser_seg"movsb\n" \
30213 " movl %3,%0\n" \
30214 " shrl $2,%0\n" \
30215 " andl $3,%3\n" \
30216 " .align 2,0x90\n" \
30217 - "0: rep; movsl\n" \
30218 + "0: rep; "__copyuser_seg"movsl\n" \
30219 " movl %3,%0\n" \
30220 - "1: rep; movsb\n" \
30221 + "1: rep; "__copyuser_seg"movsb\n" \
30222 "2:\n" \
30223 ".section .fixup,\"ax\"\n" \
30224 "5: addl %3,%0\n" \
30225 @@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
30226 {
30227 stac();
30228 if (movsl_is_ok(to, from, n))
30229 - __copy_user(to, from, n);
30230 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
30231 else
30232 - n = __copy_user_intel(to, from, n);
30233 + n = __generic_copy_to_user_intel(to, from, n);
30234 clac();
30235 return n;
30236 }
30237 @@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
30238 {
30239 stac();
30240 if (movsl_is_ok(to, from, n))
30241 - __copy_user(to, from, n);
30242 + __copy_user(to, from, n, __copyuser_seg, "", "");
30243 else
30244 - n = __copy_user_intel((void __user *)to,
30245 - (const void *)from, n);
30246 + n = __generic_copy_from_user_intel(to, from, n);
30247 clac();
30248 return n;
30249 }
30250 @@ -632,60 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
30251 if (n > 64 && cpu_has_xmm2)
30252 n = __copy_user_intel_nocache(to, from, n);
30253 else
30254 - __copy_user(to, from, n);
30255 + __copy_user(to, from, n, __copyuser_seg, "", "");
30256 #else
30257 - __copy_user(to, from, n);
30258 + __copy_user(to, from, n, __copyuser_seg, "", "");
30259 #endif
30260 clac();
30261 return n;
30262 }
30263 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
30264
30265 -/**
30266 - * copy_to_user: - Copy a block of data into user space.
30267 - * @to: Destination address, in user space.
30268 - * @from: Source address, in kernel space.
30269 - * @n: Number of bytes to copy.
30270 - *
30271 - * Context: User context only. This function may sleep.
30272 - *
30273 - * Copy data from kernel space to user space.
30274 - *
30275 - * Returns number of bytes that could not be copied.
30276 - * On success, this will be zero.
30277 - */
30278 -unsigned long
30279 -copy_to_user(void __user *to, const void *from, unsigned long n)
30280 +#ifdef CONFIG_PAX_MEMORY_UDEREF
30281 +void __set_fs(mm_segment_t x)
30282 {
30283 - if (access_ok(VERIFY_WRITE, to, n))
30284 - n = __copy_to_user(to, from, n);
30285 - return n;
30286 + switch (x.seg) {
30287 + case 0:
30288 + loadsegment(gs, 0);
30289 + break;
30290 + case TASK_SIZE_MAX:
30291 + loadsegment(gs, __USER_DS);
30292 + break;
30293 + case -1UL:
30294 + loadsegment(gs, __KERNEL_DS);
30295 + break;
30296 + default:
30297 + BUG();
30298 + }
30299 }
30300 -EXPORT_SYMBOL(copy_to_user);
30301 +EXPORT_SYMBOL(__set_fs);
30302
30303 -/**
30304 - * copy_from_user: - Copy a block of data from user space.
30305 - * @to: Destination address, in kernel space.
30306 - * @from: Source address, in user space.
30307 - * @n: Number of bytes to copy.
30308 - *
30309 - * Context: User context only. This function may sleep.
30310 - *
30311 - * Copy data from user space to kernel space.
30312 - *
30313 - * Returns number of bytes that could not be copied.
30314 - * On success, this will be zero.
30315 - *
30316 - * If some data could not be copied, this function will pad the copied
30317 - * data to the requested size using zero bytes.
30318 - */
30319 -unsigned long
30320 -_copy_from_user(void *to, const void __user *from, unsigned long n)
30321 +void set_fs(mm_segment_t x)
30322 {
30323 - if (access_ok(VERIFY_READ, from, n))
30324 - n = __copy_from_user(to, from, n);
30325 - else
30326 - memset(to, 0, n);
30327 - return n;
30328 + current_thread_info()->addr_limit = x;
30329 + __set_fs(x);
30330 }
30331 -EXPORT_SYMBOL(_copy_from_user);
30332 +EXPORT_SYMBOL(set_fs);
30333 +#endif
30334 diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
30335 index c905e89..01ab928 100644
30336 --- a/arch/x86/lib/usercopy_64.c
30337 +++ b/arch/x86/lib/usercopy_64.c
30338 @@ -18,6 +18,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
30339 might_fault();
30340 /* no memory constraint because it doesn't change any memory gcc knows
30341 about */
30342 + pax_open_userland();
30343 stac();
30344 asm volatile(
30345 " testq %[size8],%[size8]\n"
30346 @@ -39,9 +40,10 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
30347 _ASM_EXTABLE(0b,3b)
30348 _ASM_EXTABLE(1b,2b)
30349 : [size8] "=&c"(size), [dst] "=&D" (__d0)
30350 - : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
30351 + : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
30352 [zero] "r" (0UL), [eight] "r" (8UL));
30353 clac();
30354 + pax_close_userland();
30355 return size;
30356 }
30357 EXPORT_SYMBOL(__clear_user);
30358 @@ -54,12 +56,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
30359 }
30360 EXPORT_SYMBOL(clear_user);
30361
30362 -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
30363 +unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
30364 {
30365 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
30366 - return copy_user_generic((__force void *)to, (__force void *)from, len);
30367 - }
30368 - return len;
30369 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
30370 + return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
30371 + return len;
30372 }
30373 EXPORT_SYMBOL(copy_in_user);
30374
30375 @@ -69,11 +70,13 @@ EXPORT_SYMBOL(copy_in_user);
30376 * it is not necessary to optimize tail handling.
30377 */
30378 __visible unsigned long
30379 -copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
30380 +copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
30381 {
30382 char c;
30383 unsigned zero_len;
30384
30385 + clac();
30386 + pax_close_userland();
30387 for (; len; --len, to++) {
30388 if (__get_user_nocheck(c, from++, sizeof(char)))
30389 break;
30390 @@ -84,6 +87,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
30391 for (c = 0, zero_len = len; zerorest && zero_len; --zero_len)
30392 if (__put_user_nocheck(c, to++, sizeof(char)))
30393 break;
30394 - clac();
30395 return len;
30396 }
30397 diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
30398 index 23d8e5f..9ccc13a 100644
30399 --- a/arch/x86/mm/Makefile
30400 +++ b/arch/x86/mm/Makefile
30401 @@ -28,3 +28,7 @@ obj-$(CONFIG_ACPI_NUMA) += srat.o
30402 obj-$(CONFIG_NUMA_EMU) += numa_emulation.o
30403
30404 obj-$(CONFIG_MEMTEST) += memtest.o
30405 +
30406 +quote:="
30407 +obj-$(CONFIG_X86_64) += uderef_64.o
30408 +CFLAGS_uderef_64.o := $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
30409 diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
30410 index 903ec1e..c4166b2 100644
30411 --- a/arch/x86/mm/extable.c
30412 +++ b/arch/x86/mm/extable.c
30413 @@ -6,12 +6,24 @@
30414 static inline unsigned long
30415 ex_insn_addr(const struct exception_table_entry *x)
30416 {
30417 - return (unsigned long)&x->insn + x->insn;
30418 + unsigned long reloc = 0;
30419 +
30420 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30421 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30422 +#endif
30423 +
30424 + return (unsigned long)&x->insn + x->insn + reloc;
30425 }
30426 static inline unsigned long
30427 ex_fixup_addr(const struct exception_table_entry *x)
30428 {
30429 - return (unsigned long)&x->fixup + x->fixup;
30430 + unsigned long reloc = 0;
30431 +
30432 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30433 + reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30434 +#endif
30435 +
30436 + return (unsigned long)&x->fixup + x->fixup + reloc;
30437 }
30438
30439 int fixup_exception(struct pt_regs *regs)
30440 @@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
30441 unsigned long new_ip;
30442
30443 #ifdef CONFIG_PNPBIOS
30444 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
30445 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
30446 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
30447 extern u32 pnp_bios_is_utter_crap;
30448 pnp_bios_is_utter_crap = 1;
30449 @@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
30450 i += 4;
30451 p->fixup -= i;
30452 i += 4;
30453 +
30454 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30455 + BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
30456 + p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30457 + p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
30458 +#endif
30459 +
30460 }
30461 }
30462
30463 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
30464 index 3aaeffc..42ea9fb 100644
30465 --- a/arch/x86/mm/fault.c
30466 +++ b/arch/x86/mm/fault.c
30467 @@ -14,11 +14,18 @@
30468 #include <linux/hugetlb.h> /* hstate_index_to_shift */
30469 #include <linux/prefetch.h> /* prefetchw */
30470 #include <linux/context_tracking.h> /* exception_enter(), ... */
30471 +#include <linux/unistd.h>
30472 +#include <linux/compiler.h>
30473
30474 #include <asm/traps.h> /* dotraplinkage, ... */
30475 #include <asm/pgalloc.h> /* pgd_*(), ... */
30476 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
30477 #include <asm/fixmap.h> /* VSYSCALL_START */
30478 +#include <asm/tlbflush.h>
30479 +
30480 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30481 +#include <asm/stacktrace.h>
30482 +#endif
30483
30484 /*
30485 * Page fault error code bits:
30486 @@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
30487 int ret = 0;
30488
30489 /* kprobe_running() needs smp_processor_id() */
30490 - if (kprobes_built_in() && !user_mode_vm(regs)) {
30491 + if (kprobes_built_in() && !user_mode(regs)) {
30492 preempt_disable();
30493 if (kprobe_running() && kprobe_fault_handler(regs, 14))
30494 ret = 1;
30495 @@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
30496 return !instr_lo || (instr_lo>>1) == 1;
30497 case 0x00:
30498 /* Prefetch instruction is 0x0F0D or 0x0F18 */
30499 - if (probe_kernel_address(instr, opcode))
30500 + if (user_mode(regs)) {
30501 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
30502 + return 0;
30503 + } else if (probe_kernel_address(instr, opcode))
30504 return 0;
30505
30506 *prefetch = (instr_lo == 0xF) &&
30507 @@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
30508 while (instr < max_instr) {
30509 unsigned char opcode;
30510
30511 - if (probe_kernel_address(instr, opcode))
30512 + if (user_mode(regs)) {
30513 + if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
30514 + break;
30515 + } else if (probe_kernel_address(instr, opcode))
30516 break;
30517
30518 instr++;
30519 @@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
30520 force_sig_info(si_signo, &info, tsk);
30521 }
30522
30523 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30524 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
30525 +#endif
30526 +
30527 +#ifdef CONFIG_PAX_EMUTRAMP
30528 +static int pax_handle_fetch_fault(struct pt_regs *regs);
30529 +#endif
30530 +
30531 +#ifdef CONFIG_PAX_PAGEEXEC
30532 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
30533 +{
30534 + pgd_t *pgd;
30535 + pud_t *pud;
30536 + pmd_t *pmd;
30537 +
30538 + pgd = pgd_offset(mm, address);
30539 + if (!pgd_present(*pgd))
30540 + return NULL;
30541 + pud = pud_offset(pgd, address);
30542 + if (!pud_present(*pud))
30543 + return NULL;
30544 + pmd = pmd_offset(pud, address);
30545 + if (!pmd_present(*pmd))
30546 + return NULL;
30547 + return pmd;
30548 +}
30549 +#endif
30550 +
30551 DEFINE_SPINLOCK(pgd_lock);
30552 LIST_HEAD(pgd_list);
30553
30554 @@ -232,10 +273,27 @@ void vmalloc_sync_all(void)
30555 for (address = VMALLOC_START & PMD_MASK;
30556 address >= TASK_SIZE && address < FIXADDR_TOP;
30557 address += PMD_SIZE) {
30558 +
30559 +#ifdef CONFIG_PAX_PER_CPU_PGD
30560 + unsigned long cpu;
30561 +#else
30562 struct page *page;
30563 +#endif
30564
30565 spin_lock(&pgd_lock);
30566 +
30567 +#ifdef CONFIG_PAX_PER_CPU_PGD
30568 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
30569 + pgd_t *pgd = get_cpu_pgd(cpu, user);
30570 + pmd_t *ret;
30571 +
30572 + ret = vmalloc_sync_one(pgd, address);
30573 + if (!ret)
30574 + break;
30575 + pgd = get_cpu_pgd(cpu, kernel);
30576 +#else
30577 list_for_each_entry(page, &pgd_list, lru) {
30578 + pgd_t *pgd;
30579 spinlock_t *pgt_lock;
30580 pmd_t *ret;
30581
30582 @@ -243,8 +301,14 @@ void vmalloc_sync_all(void)
30583 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
30584
30585 spin_lock(pgt_lock);
30586 - ret = vmalloc_sync_one(page_address(page), address);
30587 + pgd = page_address(page);
30588 +#endif
30589 +
30590 + ret = vmalloc_sync_one(pgd, address);
30591 +
30592 +#ifndef CONFIG_PAX_PER_CPU_PGD
30593 spin_unlock(pgt_lock);
30594 +#endif
30595
30596 if (!ret)
30597 break;
30598 @@ -278,6 +342,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
30599 * an interrupt in the middle of a task switch..
30600 */
30601 pgd_paddr = read_cr3();
30602 +
30603 +#ifdef CONFIG_PAX_PER_CPU_PGD
30604 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (pgd_paddr & __PHYSICAL_MASK));
30605 + vmalloc_sync_one(__va(pgd_paddr + PAGE_SIZE), address);
30606 +#endif
30607 +
30608 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
30609 if (!pmd_k)
30610 return -1;
30611 @@ -373,11 +443,25 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
30612 * happen within a race in page table update. In the later
30613 * case just flush:
30614 */
30615 - pgd = pgd_offset(current->active_mm, address);
30616 +
30617 pgd_ref = pgd_offset_k(address);
30618 if (pgd_none(*pgd_ref))
30619 return -1;
30620
30621 +#ifdef CONFIG_PAX_PER_CPU_PGD
30622 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id(), kernel)) != (read_cr3() & __PHYSICAL_MASK));
30623 + pgd = pgd_offset_cpu(smp_processor_id(), user, address);
30624 + if (pgd_none(*pgd)) {
30625 + set_pgd(pgd, *pgd_ref);
30626 + arch_flush_lazy_mmu_mode();
30627 + } else {
30628 + BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
30629 + }
30630 + pgd = pgd_offset_cpu(smp_processor_id(), kernel, address);
30631 +#else
30632 + pgd = pgd_offset(current->active_mm, address);
30633 +#endif
30634 +
30635 if (pgd_none(*pgd)) {
30636 set_pgd(pgd, *pgd_ref);
30637 arch_flush_lazy_mmu_mode();
30638 @@ -543,7 +627,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
30639 static int is_errata100(struct pt_regs *regs, unsigned long address)
30640 {
30641 #ifdef CONFIG_X86_64
30642 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
30643 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
30644 return 1;
30645 #endif
30646 return 0;
30647 @@ -570,7 +654,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
30648 }
30649
30650 static const char nx_warning[] = KERN_CRIT
30651 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
30652 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
30653
30654 static void
30655 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30656 @@ -579,15 +663,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
30657 if (!oops_may_print())
30658 return;
30659
30660 - if (error_code & PF_INSTR) {
30661 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
30662 unsigned int level;
30663
30664 pte_t *pte = lookup_address(address, &level);
30665
30666 if (pte && pte_present(*pte) && !pte_exec(*pte))
30667 - printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
30668 + printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
30669 }
30670
30671 +#ifdef CONFIG_PAX_KERNEXEC
30672 + if (init_mm.start_code <= address && address < init_mm.end_code) {
30673 + if (current->signal->curr_ip)
30674 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
30675 + &current->signal->curr_ip, current->comm, task_pid_nr(current),
30676 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30677 + else
30678 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
30679 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
30680 + }
30681 +#endif
30682 +
30683 printk(KERN_ALERT "BUG: unable to handle kernel ");
30684 if (address < PAGE_SIZE)
30685 printk(KERN_CONT "NULL pointer dereference");
30686 @@ -750,6 +846,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
30687 return;
30688 }
30689 #endif
30690 +
30691 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30692 + if (pax_is_fetch_fault(regs, error_code, address)) {
30693 +
30694 +#ifdef CONFIG_PAX_EMUTRAMP
30695 + switch (pax_handle_fetch_fault(regs)) {
30696 + case 2:
30697 + return;
30698 + }
30699 +#endif
30700 +
30701 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30702 + do_group_exit(SIGKILL);
30703 + }
30704 +#endif
30705 +
30706 /* Kernel addresses are always protection faults: */
30707 if (address >= TASK_SIZE)
30708 error_code |= PF_PROT;
30709 @@ -835,7 +947,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
30710 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
30711 printk(KERN_ERR
30712 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
30713 - tsk->comm, tsk->pid, address);
30714 + tsk->comm, task_pid_nr(tsk), address);
30715 code = BUS_MCEERR_AR;
30716 }
30717 #endif
30718 @@ -889,6 +1001,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
30719 return 1;
30720 }
30721
30722 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
30723 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
30724 +{
30725 + pte_t *pte;
30726 + pmd_t *pmd;
30727 + spinlock_t *ptl;
30728 + unsigned char pte_mask;
30729 +
30730 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
30731 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
30732 + return 0;
30733 +
30734 + /* PaX: it's our fault, let's handle it if we can */
30735 +
30736 + /* PaX: take a look at read faults before acquiring any locks */
30737 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
30738 + /* instruction fetch attempt from a protected page in user mode */
30739 + up_read(&mm->mmap_sem);
30740 +
30741 +#ifdef CONFIG_PAX_EMUTRAMP
30742 + switch (pax_handle_fetch_fault(regs)) {
30743 + case 2:
30744 + return 1;
30745 + }
30746 +#endif
30747 +
30748 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
30749 + do_group_exit(SIGKILL);
30750 + }
30751 +
30752 + pmd = pax_get_pmd(mm, address);
30753 + if (unlikely(!pmd))
30754 + return 0;
30755 +
30756 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
30757 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
30758 + pte_unmap_unlock(pte, ptl);
30759 + return 0;
30760 + }
30761 +
30762 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
30763 + /* write attempt to a protected page in user mode */
30764 + pte_unmap_unlock(pte, ptl);
30765 + return 0;
30766 + }
30767 +
30768 +#ifdef CONFIG_SMP
30769 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
30770 +#else
30771 + if (likely(address > get_limit(regs->cs)))
30772 +#endif
30773 + {
30774 + set_pte(pte, pte_mkread(*pte));
30775 + __flush_tlb_one(address);
30776 + pte_unmap_unlock(pte, ptl);
30777 + up_read(&mm->mmap_sem);
30778 + return 1;
30779 + }
30780 +
30781 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
30782 +
30783 + /*
30784 + * PaX: fill DTLB with user rights and retry
30785 + */
30786 + __asm__ __volatile__ (
30787 + "orb %2,(%1)\n"
30788 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
30789 +/*
30790 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
30791 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
30792 + * page fault when examined during a TLB load attempt. this is true not only
30793 + * for PTEs holding a non-present entry but also present entries that will
30794 + * raise a page fault (such as those set up by PaX, or the copy-on-write
30795 + * mechanism). in effect it means that we do *not* need to flush the TLBs
30796 + * for our target pages since their PTEs are simply not in the TLBs at all.
30797 +
30798 + * the best thing in omitting it is that we gain around 15-20% speed in the
30799 + * fast path of the page fault handler and can get rid of tracing since we
30800 + * can no longer flush unintended entries.
30801 + */
30802 + "invlpg (%0)\n"
30803 +#endif
30804 + __copyuser_seg"testb $0,(%0)\n"
30805 + "xorb %3,(%1)\n"
30806 + :
30807 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
30808 + : "memory", "cc");
30809 + pte_unmap_unlock(pte, ptl);
30810 + up_read(&mm->mmap_sem);
30811 + return 1;
30812 +}
30813 +#endif
30814 +
30815 /*
30816 * Handle a spurious fault caused by a stale TLB entry.
30817 *
30818 @@ -955,6 +1160,9 @@ int show_unhandled_signals = 1;
30819 static inline int
30820 access_error(unsigned long error_code, struct vm_area_struct *vma)
30821 {
30822 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
30823 + return 1;
30824 +
30825 if (error_code & PF_WRITE) {
30826 /* write, present and write, not present: */
30827 if (unlikely(!(vma->vm_flags & VM_WRITE)))
30828 @@ -983,7 +1191,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
30829 if (error_code & PF_USER)
30830 return false;
30831
30832 - if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
30833 + if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
30834 return false;
30835
30836 return true;
30837 @@ -1010,6 +1218,22 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
30838 /* Get the faulting address: */
30839 address = read_cr2();
30840
30841 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
30842 + if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
30843 + if (!search_exception_tables(regs->ip)) {
30844 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
30845 + bad_area_nosemaphore(regs, error_code, address);
30846 + return;
30847 + }
30848 + if (address < pax_user_shadow_base) {
30849 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
30850 + printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
30851 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
30852 + } else
30853 + address -= pax_user_shadow_base;
30854 + }
30855 +#endif
30856 +
30857 /*
30858 * Detect and handle instructions that would cause a page fault for
30859 * both a tracked kernel page and a userspace page.
30860 @@ -1069,7 +1293,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
30861 * User-mode registers count as a user access even for any
30862 * potential system fault or CPU buglet:
30863 */
30864 - if (user_mode_vm(regs)) {
30865 + if (user_mode(regs)) {
30866 local_irq_enable();
30867 error_code |= PF_USER;
30868 flags |= FAULT_FLAG_USER;
30869 @@ -1135,6 +1359,11 @@ retry:
30870 might_sleep();
30871 }
30872
30873 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
30874 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
30875 + return;
30876 +#endif
30877 +
30878 vma = find_vma(mm, address);
30879 if (unlikely(!vma)) {
30880 bad_area(regs, error_code, address);
30881 @@ -1146,18 +1375,24 @@ retry:
30882 bad_area(regs, error_code, address);
30883 return;
30884 }
30885 - if (error_code & PF_USER) {
30886 - /*
30887 - * Accessing the stack below %sp is always a bug.
30888 - * The large cushion allows instructions like enter
30889 - * and pusha to work. ("enter $65535, $31" pushes
30890 - * 32 pointers and then decrements %sp by 65535.)
30891 - */
30892 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
30893 - bad_area(regs, error_code, address);
30894 - return;
30895 - }
30896 + /*
30897 + * Accessing the stack below %sp is always a bug.
30898 + * The large cushion allows instructions like enter
30899 + * and pusha to work. ("enter $65535, $31" pushes
30900 + * 32 pointers and then decrements %sp by 65535.)
30901 + */
30902 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
30903 + bad_area(regs, error_code, address);
30904 + return;
30905 }
30906 +
30907 +#ifdef CONFIG_PAX_SEGMEXEC
30908 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
30909 + bad_area(regs, error_code, address);
30910 + return;
30911 + }
30912 +#endif
30913 +
30914 if (unlikely(expand_stack(vma, address))) {
30915 bad_area(regs, error_code, address);
30916 return;
30917 @@ -1231,3 +1466,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
30918 __do_page_fault(regs, error_code);
30919 exception_exit(prev_state);
30920 }
30921 +
30922 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
30923 +static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
30924 +{
30925 + struct mm_struct *mm = current->mm;
30926 + unsigned long ip = regs->ip;
30927 +
30928 + if (v8086_mode(regs))
30929 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
30930 +
30931 +#ifdef CONFIG_PAX_PAGEEXEC
30932 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
30933 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
30934 + return true;
30935 + if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
30936 + return true;
30937 + return false;
30938 + }
30939 +#endif
30940 +
30941 +#ifdef CONFIG_PAX_SEGMEXEC
30942 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
30943 + if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
30944 + return true;
30945 + return false;
30946 + }
30947 +#endif
30948 +
30949 + return false;
30950 +}
30951 +#endif
30952 +
30953 +#ifdef CONFIG_PAX_EMUTRAMP
30954 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
30955 +{
30956 + int err;
30957 +
30958 + do { /* PaX: libffi trampoline emulation */
30959 + unsigned char mov, jmp;
30960 + unsigned int addr1, addr2;
30961 +
30962 +#ifdef CONFIG_X86_64
30963 + if ((regs->ip + 9) >> 32)
30964 + break;
30965 +#endif
30966 +
30967 + err = get_user(mov, (unsigned char __user *)regs->ip);
30968 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
30969 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
30970 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
30971 +
30972 + if (err)
30973 + break;
30974 +
30975 + if (mov == 0xB8 && jmp == 0xE9) {
30976 + regs->ax = addr1;
30977 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
30978 + return 2;
30979 + }
30980 + } while (0);
30981 +
30982 + do { /* PaX: gcc trampoline emulation #1 */
30983 + unsigned char mov1, mov2;
30984 + unsigned short jmp;
30985 + unsigned int addr1, addr2;
30986 +
30987 +#ifdef CONFIG_X86_64
30988 + if ((regs->ip + 11) >> 32)
30989 + break;
30990 +#endif
30991 +
30992 + err = get_user(mov1, (unsigned char __user *)regs->ip);
30993 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
30994 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
30995 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
30996 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
30997 +
30998 + if (err)
30999 + break;
31000 +
31001 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
31002 + regs->cx = addr1;
31003 + regs->ax = addr2;
31004 + regs->ip = addr2;
31005 + return 2;
31006 + }
31007 + } while (0);
31008 +
31009 + do { /* PaX: gcc trampoline emulation #2 */
31010 + unsigned char mov, jmp;
31011 + unsigned int addr1, addr2;
31012 +
31013 +#ifdef CONFIG_X86_64
31014 + if ((regs->ip + 9) >> 32)
31015 + break;
31016 +#endif
31017 +
31018 + err = get_user(mov, (unsigned char __user *)regs->ip);
31019 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
31020 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
31021 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
31022 +
31023 + if (err)
31024 + break;
31025 +
31026 + if (mov == 0xB9 && jmp == 0xE9) {
31027 + regs->cx = addr1;
31028 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
31029 + return 2;
31030 + }
31031 + } while (0);
31032 +
31033 + return 1; /* PaX in action */
31034 +}
31035 +
31036 +#ifdef CONFIG_X86_64
31037 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
31038 +{
31039 + int err;
31040 +
31041 + do { /* PaX: libffi trampoline emulation */
31042 + unsigned short mov1, mov2, jmp1;
31043 + unsigned char stcclc, jmp2;
31044 + unsigned long addr1, addr2;
31045 +
31046 + err = get_user(mov1, (unsigned short __user *)regs->ip);
31047 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
31048 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
31049 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
31050 + err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
31051 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
31052 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
31053 +
31054 + if (err)
31055 + break;
31056 +
31057 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31058 + regs->r11 = addr1;
31059 + regs->r10 = addr2;
31060 + if (stcclc == 0xF8)
31061 + regs->flags &= ~X86_EFLAGS_CF;
31062 + else
31063 + regs->flags |= X86_EFLAGS_CF;
31064 + regs->ip = addr1;
31065 + return 2;
31066 + }
31067 + } while (0);
31068 +
31069 + do { /* PaX: gcc trampoline emulation #1 */
31070 + unsigned short mov1, mov2, jmp1;
31071 + unsigned char jmp2;
31072 + unsigned int addr1;
31073 + unsigned long addr2;
31074 +
31075 + err = get_user(mov1, (unsigned short __user *)regs->ip);
31076 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
31077 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
31078 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
31079 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
31080 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
31081 +
31082 + if (err)
31083 + break;
31084 +
31085 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31086 + regs->r11 = addr1;
31087 + regs->r10 = addr2;
31088 + regs->ip = addr1;
31089 + return 2;
31090 + }
31091 + } while (0);
31092 +
31093 + do { /* PaX: gcc trampoline emulation #2 */
31094 + unsigned short mov1, mov2, jmp1;
31095 + unsigned char jmp2;
31096 + unsigned long addr1, addr2;
31097 +
31098 + err = get_user(mov1, (unsigned short __user *)regs->ip);
31099 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
31100 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
31101 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
31102 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
31103 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
31104 +
31105 + if (err)
31106 + break;
31107 +
31108 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
31109 + regs->r11 = addr1;
31110 + regs->r10 = addr2;
31111 + regs->ip = addr1;
31112 + return 2;
31113 + }
31114 + } while (0);
31115 +
31116 + return 1; /* PaX in action */
31117 +}
31118 +#endif
31119 +
31120 +/*
31121 + * PaX: decide what to do with offenders (regs->ip = fault address)
31122 + *
31123 + * returns 1 when task should be killed
31124 + * 2 when gcc trampoline was detected
31125 + */
31126 +static int pax_handle_fetch_fault(struct pt_regs *regs)
31127 +{
31128 + if (v8086_mode(regs))
31129 + return 1;
31130 +
31131 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
31132 + return 1;
31133 +
31134 +#ifdef CONFIG_X86_32
31135 + return pax_handle_fetch_fault_32(regs);
31136 +#else
31137 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
31138 + return pax_handle_fetch_fault_32(regs);
31139 + else
31140 + return pax_handle_fetch_fault_64(regs);
31141 +#endif
31142 +}
31143 +#endif
31144 +
31145 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31146 +void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
31147 +{
31148 + long i;
31149 +
31150 + printk(KERN_ERR "PAX: bytes at PC: ");
31151 + for (i = 0; i < 20; i++) {
31152 + unsigned char c;
31153 + if (get_user(c, (unsigned char __force_user *)pc+i))
31154 + printk(KERN_CONT "?? ");
31155 + else
31156 + printk(KERN_CONT "%02x ", c);
31157 + }
31158 + printk("\n");
31159 +
31160 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
31161 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
31162 + unsigned long c;
31163 + if (get_user(c, (unsigned long __force_user *)sp+i)) {
31164 +#ifdef CONFIG_X86_32
31165 + printk(KERN_CONT "???????? ");
31166 +#else
31167 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
31168 + printk(KERN_CONT "???????? ???????? ");
31169 + else
31170 + printk(KERN_CONT "???????????????? ");
31171 +#endif
31172 + } else {
31173 +#ifdef CONFIG_X86_64
31174 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
31175 + printk(KERN_CONT "%08x ", (unsigned int)c);
31176 + printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
31177 + } else
31178 +#endif
31179 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
31180 + }
31181 + }
31182 + printk("\n");
31183 +}
31184 +#endif
31185 +
31186 +/**
31187 + * probe_kernel_write(): safely attempt to write to a location
31188 + * @dst: address to write to
31189 + * @src: pointer to the data that shall be written
31190 + * @size: size of the data chunk
31191 + *
31192 + * Safely write to address @dst from the buffer at @src. If a kernel fault
31193 + * happens, handle that and return -EFAULT.
31194 + */
31195 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
31196 +{
31197 + long ret;
31198 + mm_segment_t old_fs = get_fs();
31199 +
31200 + set_fs(KERNEL_DS);
31201 + pagefault_disable();
31202 + pax_open_kernel();
31203 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
31204 + pax_close_kernel();
31205 + pagefault_enable();
31206 + set_fs(old_fs);
31207 +
31208 + return ret ? -EFAULT : 0;
31209 +}
31210 diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
31211 index dd74e46..0970b01 100644
31212 --- a/arch/x86/mm/gup.c
31213 +++ b/arch/x86/mm/gup.c
31214 @@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
31215 addr = start;
31216 len = (unsigned long) nr_pages << PAGE_SHIFT;
31217 end = start + len;
31218 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31219 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31220 (void __user *)start, len)))
31221 return 0;
31222
31223 @@ -331,6 +331,10 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
31224 goto slow_irqon;
31225 #endif
31226
31227 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
31228 + (void __user *)start, len)))
31229 + return 0;
31230 +
31231 /*
31232 * XXX: batch / limit 'nr', to avoid large irq off latency
31233 * needs some instrumenting to determine the common sizes used by
31234 diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
31235 index 4500142..53a363c 100644
31236 --- a/arch/x86/mm/highmem_32.c
31237 +++ b/arch/x86/mm/highmem_32.c
31238 @@ -45,7 +45,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
31239 idx = type + KM_TYPE_NR*smp_processor_id();
31240 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
31241 BUG_ON(!pte_none(*(kmap_pte-idx)));
31242 +
31243 + pax_open_kernel();
31244 set_pte(kmap_pte-idx, mk_pte(page, prot));
31245 + pax_close_kernel();
31246 +
31247 arch_flush_lazy_mmu_mode();
31248
31249 return (void *)vaddr;
31250 diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
31251 index 9d980d8..6bbfacb 100644
31252 --- a/arch/x86/mm/hugetlbpage.c
31253 +++ b/arch/x86/mm/hugetlbpage.c
31254 @@ -92,23 +92,30 @@ int pmd_huge_support(void)
31255 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
31256 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
31257 unsigned long addr, unsigned long len,
31258 - unsigned long pgoff, unsigned long flags)
31259 + unsigned long pgoff, unsigned long flags, unsigned long offset)
31260 {
31261 struct hstate *h = hstate_file(file);
31262 struct vm_unmapped_area_info info;
31263 -
31264 +
31265 info.flags = 0;
31266 info.length = len;
31267 info.low_limit = TASK_UNMAPPED_BASE;
31268 +
31269 +#ifdef CONFIG_PAX_RANDMMAP
31270 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
31271 + info.low_limit += current->mm->delta_mmap;
31272 +#endif
31273 +
31274 info.high_limit = TASK_SIZE;
31275 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
31276 info.align_offset = 0;
31277 + info.threadstack_offset = offset;
31278 return vm_unmapped_area(&info);
31279 }
31280
31281 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
31282 unsigned long addr0, unsigned long len,
31283 - unsigned long pgoff, unsigned long flags)
31284 + unsigned long pgoff, unsigned long flags, unsigned long offset)
31285 {
31286 struct hstate *h = hstate_file(file);
31287 struct vm_unmapped_area_info info;
31288 @@ -120,6 +127,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
31289 info.high_limit = current->mm->mmap_base;
31290 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
31291 info.align_offset = 0;
31292 + info.threadstack_offset = offset;
31293 addr = vm_unmapped_area(&info);
31294
31295 /*
31296 @@ -132,6 +140,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
31297 VM_BUG_ON(addr != -ENOMEM);
31298 info.flags = 0;
31299 info.low_limit = TASK_UNMAPPED_BASE;
31300 +
31301 +#ifdef CONFIG_PAX_RANDMMAP
31302 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
31303 + info.low_limit += current->mm->delta_mmap;
31304 +#endif
31305 +
31306 info.high_limit = TASK_SIZE;
31307 addr = vm_unmapped_area(&info);
31308 }
31309 @@ -146,10 +160,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
31310 struct hstate *h = hstate_file(file);
31311 struct mm_struct *mm = current->mm;
31312 struct vm_area_struct *vma;
31313 + unsigned long pax_task_size = TASK_SIZE;
31314 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
31315
31316 if (len & ~huge_page_mask(h))
31317 return -EINVAL;
31318 - if (len > TASK_SIZE)
31319 +
31320 +#ifdef CONFIG_PAX_SEGMEXEC
31321 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
31322 + pax_task_size = SEGMEXEC_TASK_SIZE;
31323 +#endif
31324 +
31325 + pax_task_size -= PAGE_SIZE;
31326 +
31327 + if (len > pax_task_size)
31328 return -ENOMEM;
31329
31330 if (flags & MAP_FIXED) {
31331 @@ -158,19 +182,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
31332 return addr;
31333 }
31334
31335 +#ifdef CONFIG_PAX_RANDMMAP
31336 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
31337 +#endif
31338 +
31339 if (addr) {
31340 addr = ALIGN(addr, huge_page_size(h));
31341 vma = find_vma(mm, addr);
31342 - if (TASK_SIZE - len >= addr &&
31343 - (!vma || addr + len <= vma->vm_start))
31344 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
31345 return addr;
31346 }
31347 if (mm->get_unmapped_area == arch_get_unmapped_area)
31348 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
31349 - pgoff, flags);
31350 + pgoff, flags, offset);
31351 else
31352 return hugetlb_get_unmapped_area_topdown(file, addr, len,
31353 - pgoff, flags);
31354 + pgoff, flags, offset);
31355 }
31356
31357 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
31358 diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
31359 index 04664cd..dae6e5d 100644
31360 --- a/arch/x86/mm/init.c
31361 +++ b/arch/x86/mm/init.c
31362 @@ -4,6 +4,7 @@
31363 #include <linux/swap.h>
31364 #include <linux/memblock.h>
31365 #include <linux/bootmem.h> /* for max_low_pfn */
31366 +#include <linux/tboot.h>
31367
31368 #include <asm/cacheflush.h>
31369 #include <asm/e820.h>
31370 @@ -17,6 +18,8 @@
31371 #include <asm/proto.h>
31372 #include <asm/dma.h> /* for MAX_DMA_PFN */
31373 #include <asm/microcode.h>
31374 +#include <asm/desc.h>
31375 +#include <asm/bios_ebda.h>
31376
31377 #include "mm_internal.h"
31378
31379 @@ -465,7 +468,18 @@ void __init init_mem_mapping(void)
31380 early_ioremap_page_table_range_init();
31381 #endif
31382
31383 +#ifdef CONFIG_PAX_PER_CPU_PGD
31384 + clone_pgd_range(get_cpu_pgd(0, kernel) + KERNEL_PGD_BOUNDARY,
31385 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
31386 + KERNEL_PGD_PTRS);
31387 + clone_pgd_range(get_cpu_pgd(0, user) + KERNEL_PGD_BOUNDARY,
31388 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
31389 + KERNEL_PGD_PTRS);
31390 + load_cr3(get_cpu_pgd(0, kernel));
31391 +#else
31392 load_cr3(swapper_pg_dir);
31393 +#endif
31394 +
31395 __flush_tlb_all();
31396
31397 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
31398 @@ -481,10 +495,40 @@ void __init init_mem_mapping(void)
31399 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
31400 * mmio resources as well as potential bios/acpi data regions.
31401 */
31402 +
31403 +#ifdef CONFIG_GRKERNSEC_KMEM
31404 +static unsigned int ebda_start __read_only;
31405 +static unsigned int ebda_end __read_only;
31406 +#endif
31407 +
31408 int devmem_is_allowed(unsigned long pagenr)
31409 {
31410 - if (pagenr < 256)
31411 +#ifdef CONFIG_GRKERNSEC_KMEM
31412 + /* allow BDA */
31413 + if (!pagenr)
31414 return 1;
31415 + /* allow EBDA */
31416 + if (pagenr >= ebda_start && pagenr < ebda_end)
31417 + return 1;
31418 + /* if tboot is in use, allow access to its hardcoded serial log range */
31419 + if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
31420 + return 1;
31421 +#else
31422 + if (!pagenr)
31423 + return 1;
31424 +#ifdef CONFIG_VM86
31425 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
31426 + return 1;
31427 +#endif
31428 +#endif
31429 +
31430 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
31431 + return 1;
31432 +#ifdef CONFIG_GRKERNSEC_KMEM
31433 + /* throw out everything else below 1MB */
31434 + if (pagenr <= 256)
31435 + return 0;
31436 +#endif
31437 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
31438 return 0;
31439 if (!page_is_ram(pagenr))
31440 @@ -530,8 +574,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
31441 #endif
31442 }
31443
31444 +#ifdef CONFIG_GRKERNSEC_KMEM
31445 +static inline void gr_init_ebda(void)
31446 +{
31447 + unsigned int ebda_addr;
31448 + unsigned int ebda_size = 0;
31449 +
31450 + ebda_addr = get_bios_ebda();
31451 + if (ebda_addr) {
31452 + ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
31453 + ebda_size <<= 10;
31454 + }
31455 + if (ebda_addr && ebda_size) {
31456 + ebda_start = ebda_addr >> PAGE_SHIFT;
31457 + ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
31458 + } else {
31459 + ebda_start = 0x9f000 >> PAGE_SHIFT;
31460 + ebda_end = 0xa0000 >> PAGE_SHIFT;
31461 + }
31462 +}
31463 +#else
31464 +static inline void gr_init_ebda(void) { }
31465 +#endif
31466 +
31467 void free_initmem(void)
31468 {
31469 +#ifdef CONFIG_PAX_KERNEXEC
31470 +#ifdef CONFIG_X86_32
31471 + /* PaX: limit KERNEL_CS to actual size */
31472 + unsigned long addr, limit;
31473 + struct desc_struct d;
31474 + int cpu;
31475 +#else
31476 + pgd_t *pgd;
31477 + pud_t *pud;
31478 + pmd_t *pmd;
31479 + unsigned long addr, end;
31480 +#endif
31481 +#endif
31482 +
31483 + gr_init_ebda();
31484 +
31485 +#ifdef CONFIG_PAX_KERNEXEC
31486 +#ifdef CONFIG_X86_32
31487 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
31488 + limit = (limit - 1UL) >> PAGE_SHIFT;
31489 +
31490 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
31491 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
31492 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
31493 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
31494 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
31495 + }
31496 +
31497 + /* PaX: make KERNEL_CS read-only */
31498 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
31499 + if (!paravirt_enabled())
31500 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
31501 +/*
31502 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
31503 + pgd = pgd_offset_k(addr);
31504 + pud = pud_offset(pgd, addr);
31505 + pmd = pmd_offset(pud, addr);
31506 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
31507 + }
31508 +*/
31509 +#ifdef CONFIG_X86_PAE
31510 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
31511 +/*
31512 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
31513 + pgd = pgd_offset_k(addr);
31514 + pud = pud_offset(pgd, addr);
31515 + pmd = pmd_offset(pud, addr);
31516 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
31517 + }
31518 +*/
31519 +#endif
31520 +
31521 +#ifdef CONFIG_MODULES
31522 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
31523 +#endif
31524 +
31525 +#else
31526 + /* PaX: make kernel code/rodata read-only, rest non-executable */
31527 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
31528 + pgd = pgd_offset_k(addr);
31529 + pud = pud_offset(pgd, addr);
31530 + pmd = pmd_offset(pud, addr);
31531 + if (!pmd_present(*pmd))
31532 + continue;
31533 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
31534 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
31535 + else
31536 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
31537 + }
31538 +
31539 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
31540 + end = addr + KERNEL_IMAGE_SIZE;
31541 + for (; addr < end; addr += PMD_SIZE) {
31542 + pgd = pgd_offset_k(addr);
31543 + pud = pud_offset(pgd, addr);
31544 + pmd = pmd_offset(pud, addr);
31545 + if (!pmd_present(*pmd))
31546 + continue;
31547 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
31548 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
31549 + }
31550 +#endif
31551 +
31552 + flush_tlb_all();
31553 +#endif
31554 +
31555 free_init_pages("unused kernel",
31556 (unsigned long)(&__init_begin),
31557 (unsigned long)(&__init_end));
31558 diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
31559 index 4287f1f..3b99c71 100644
31560 --- a/arch/x86/mm/init_32.c
31561 +++ b/arch/x86/mm/init_32.c
31562 @@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
31563 bool __read_mostly __vmalloc_start_set = false;
31564
31565 /*
31566 - * Creates a middle page table and puts a pointer to it in the
31567 - * given global directory entry. This only returns the gd entry
31568 - * in non-PAE compilation mode, since the middle layer is folded.
31569 - */
31570 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
31571 -{
31572 - pud_t *pud;
31573 - pmd_t *pmd_table;
31574 -
31575 -#ifdef CONFIG_X86_PAE
31576 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
31577 - pmd_table = (pmd_t *)alloc_low_page();
31578 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
31579 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
31580 - pud = pud_offset(pgd, 0);
31581 - BUG_ON(pmd_table != pmd_offset(pud, 0));
31582 -
31583 - return pmd_table;
31584 - }
31585 -#endif
31586 - pud = pud_offset(pgd, 0);
31587 - pmd_table = pmd_offset(pud, 0);
31588 -
31589 - return pmd_table;
31590 -}
31591 -
31592 -/*
31593 * Create a page table and place a pointer to it in a middle page
31594 * directory entry:
31595 */
31596 @@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
31597 pte_t *page_table = (pte_t *)alloc_low_page();
31598
31599 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
31600 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
31601 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
31602 +#else
31603 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
31604 +#endif
31605 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
31606 }
31607
31608 return pte_offset_kernel(pmd, 0);
31609 }
31610
31611 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
31612 +{
31613 + pud_t *pud;
31614 + pmd_t *pmd_table;
31615 +
31616 + pud = pud_offset(pgd, 0);
31617 + pmd_table = pmd_offset(pud, 0);
31618 +
31619 + return pmd_table;
31620 +}
31621 +
31622 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
31623 {
31624 int pgd_idx = pgd_index(vaddr);
31625 @@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31626 int pgd_idx, pmd_idx;
31627 unsigned long vaddr;
31628 pgd_t *pgd;
31629 + pud_t *pud;
31630 pmd_t *pmd;
31631 pte_t *pte = NULL;
31632 unsigned long count = page_table_range_init_count(start, end);
31633 @@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31634 pgd = pgd_base + pgd_idx;
31635
31636 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
31637 - pmd = one_md_table_init(pgd);
31638 - pmd = pmd + pmd_index(vaddr);
31639 + pud = pud_offset(pgd, vaddr);
31640 + pmd = pmd_offset(pud, vaddr);
31641 +
31642 +#ifdef CONFIG_X86_PAE
31643 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31644 +#endif
31645 +
31646 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
31647 pmd++, pmd_idx++) {
31648 pte = page_table_kmap_check(one_page_table_init(pmd),
31649 @@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
31650 }
31651 }
31652
31653 -static inline int is_kernel_text(unsigned long addr)
31654 +static inline int is_kernel_text(unsigned long start, unsigned long end)
31655 {
31656 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
31657 - return 1;
31658 - return 0;
31659 + if ((start > ktla_ktva((unsigned long)_etext) ||
31660 + end <= ktla_ktva((unsigned long)_stext)) &&
31661 + (start > ktla_ktva((unsigned long)_einittext) ||
31662 + end <= ktla_ktva((unsigned long)_sinittext)) &&
31663 +
31664 +#ifdef CONFIG_ACPI_SLEEP
31665 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
31666 +#endif
31667 +
31668 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
31669 + return 0;
31670 + return 1;
31671 }
31672
31673 /*
31674 @@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
31675 unsigned long last_map_addr = end;
31676 unsigned long start_pfn, end_pfn;
31677 pgd_t *pgd_base = swapper_pg_dir;
31678 - int pgd_idx, pmd_idx, pte_ofs;
31679 + unsigned int pgd_idx, pmd_idx, pte_ofs;
31680 unsigned long pfn;
31681 pgd_t *pgd;
31682 + pud_t *pud;
31683 pmd_t *pmd;
31684 pte_t *pte;
31685 unsigned pages_2m, pages_4k;
31686 @@ -291,8 +295,13 @@ repeat:
31687 pfn = start_pfn;
31688 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31689 pgd = pgd_base + pgd_idx;
31690 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
31691 - pmd = one_md_table_init(pgd);
31692 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
31693 + pud = pud_offset(pgd, 0);
31694 + pmd = pmd_offset(pud, 0);
31695 +
31696 +#ifdef CONFIG_X86_PAE
31697 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
31698 +#endif
31699
31700 if (pfn >= end_pfn)
31701 continue;
31702 @@ -304,14 +313,13 @@ repeat:
31703 #endif
31704 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
31705 pmd++, pmd_idx++) {
31706 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
31707 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
31708
31709 /*
31710 * Map with big pages if possible, otherwise
31711 * create normal page tables:
31712 */
31713 if (use_pse) {
31714 - unsigned int addr2;
31715 pgprot_t prot = PAGE_KERNEL_LARGE;
31716 /*
31717 * first pass will use the same initial
31718 @@ -322,11 +330,7 @@ repeat:
31719 _PAGE_PSE);
31720
31721 pfn &= PMD_MASK >> PAGE_SHIFT;
31722 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
31723 - PAGE_OFFSET + PAGE_SIZE-1;
31724 -
31725 - if (is_kernel_text(addr) ||
31726 - is_kernel_text(addr2))
31727 + if (is_kernel_text(address, address + PMD_SIZE))
31728 prot = PAGE_KERNEL_LARGE_EXEC;
31729
31730 pages_2m++;
31731 @@ -343,7 +347,7 @@ repeat:
31732 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
31733 pte += pte_ofs;
31734 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
31735 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
31736 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
31737 pgprot_t prot = PAGE_KERNEL;
31738 /*
31739 * first pass will use the same initial
31740 @@ -351,7 +355,7 @@ repeat:
31741 */
31742 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
31743
31744 - if (is_kernel_text(addr))
31745 + if (is_kernel_text(address, address + PAGE_SIZE))
31746 prot = PAGE_KERNEL_EXEC;
31747
31748 pages_4k++;
31749 @@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
31750
31751 pud = pud_offset(pgd, va);
31752 pmd = pmd_offset(pud, va);
31753 - if (!pmd_present(*pmd))
31754 + if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
31755 break;
31756
31757 /* should not be large page here */
31758 @@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
31759
31760 static void __init pagetable_init(void)
31761 {
31762 - pgd_t *pgd_base = swapper_pg_dir;
31763 -
31764 - permanent_kmaps_init(pgd_base);
31765 + permanent_kmaps_init(swapper_pg_dir);
31766 }
31767
31768 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31769 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
31770 EXPORT_SYMBOL_GPL(__supported_pte_mask);
31771
31772 /* user-defined highmem size */
31773 @@ -787,10 +789,10 @@ void __init mem_init(void)
31774 ((unsigned long)&__init_end -
31775 (unsigned long)&__init_begin) >> 10,
31776
31777 - (unsigned long)&_etext, (unsigned long)&_edata,
31778 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
31779 + (unsigned long)&_sdata, (unsigned long)&_edata,
31780 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
31781
31782 - (unsigned long)&_text, (unsigned long)&_etext,
31783 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
31784 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
31785
31786 /*
31787 @@ -880,6 +882,7 @@ void set_kernel_text_rw(void)
31788 if (!kernel_set_to_readonly)
31789 return;
31790
31791 + start = ktla_ktva(start);
31792 pr_debug("Set kernel text: %lx - %lx for read write\n",
31793 start, start+size);
31794
31795 @@ -894,6 +897,7 @@ void set_kernel_text_ro(void)
31796 if (!kernel_set_to_readonly)
31797 return;
31798
31799 + start = ktla_ktva(start);
31800 pr_debug("Set kernel text: %lx - %lx for read only\n",
31801 start, start+size);
31802
31803 @@ -922,6 +926,7 @@ void mark_rodata_ro(void)
31804 unsigned long start = PFN_ALIGN(_text);
31805 unsigned long size = PFN_ALIGN(_etext) - start;
31806
31807 + start = ktla_ktva(start);
31808 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
31809 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
31810 size >> 10);
31811 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
31812 index 104d56a..62ba13f1 100644
31813 --- a/arch/x86/mm/init_64.c
31814 +++ b/arch/x86/mm/init_64.c
31815 @@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
31816 * around without checking the pgd every time.
31817 */
31818
31819 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
31820 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
31821 EXPORT_SYMBOL_GPL(__supported_pte_mask);
31822
31823 int force_personality32;
31824 @@ -184,12 +184,29 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31825
31826 for (address = start; address <= end; address += PGDIR_SIZE) {
31827 const pgd_t *pgd_ref = pgd_offset_k(address);
31828 +
31829 +#ifdef CONFIG_PAX_PER_CPU_PGD
31830 + unsigned long cpu;
31831 +#else
31832 struct page *page;
31833 +#endif
31834
31835 if (pgd_none(*pgd_ref))
31836 continue;
31837
31838 spin_lock(&pgd_lock);
31839 +
31840 +#ifdef CONFIG_PAX_PER_CPU_PGD
31841 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
31842 + pgd_t *pgd = pgd_offset_cpu(cpu, user, address);
31843 +
31844 + if (pgd_none(*pgd))
31845 + set_pgd(pgd, *pgd_ref);
31846 + else
31847 + BUG_ON(pgd_page_vaddr(*pgd)
31848 + != pgd_page_vaddr(*pgd_ref));
31849 + pgd = pgd_offset_cpu(cpu, kernel, address);
31850 +#else
31851 list_for_each_entry(page, &pgd_list, lru) {
31852 pgd_t *pgd;
31853 spinlock_t *pgt_lock;
31854 @@ -198,6 +215,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31855 /* the pgt_lock only for Xen */
31856 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
31857 spin_lock(pgt_lock);
31858 +#endif
31859
31860 if (pgd_none(*pgd))
31861 set_pgd(pgd, *pgd_ref);
31862 @@ -205,7 +223,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
31863 BUG_ON(pgd_page_vaddr(*pgd)
31864 != pgd_page_vaddr(*pgd_ref));
31865
31866 +#ifndef CONFIG_PAX_PER_CPU_PGD
31867 spin_unlock(pgt_lock);
31868 +#endif
31869 +
31870 }
31871 spin_unlock(&pgd_lock);
31872 }
31873 @@ -238,7 +259,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
31874 {
31875 if (pgd_none(*pgd)) {
31876 pud_t *pud = (pud_t *)spp_getpage();
31877 - pgd_populate(&init_mm, pgd, pud);
31878 + pgd_populate_kernel(&init_mm, pgd, pud);
31879 if (pud != pud_offset(pgd, 0))
31880 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
31881 pud, pud_offset(pgd, 0));
31882 @@ -250,7 +271,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
31883 {
31884 if (pud_none(*pud)) {
31885 pmd_t *pmd = (pmd_t *) spp_getpage();
31886 - pud_populate(&init_mm, pud, pmd);
31887 + pud_populate_kernel(&init_mm, pud, pmd);
31888 if (pmd != pmd_offset(pud, 0))
31889 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
31890 pmd, pmd_offset(pud, 0));
31891 @@ -279,7 +300,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
31892 pmd = fill_pmd(pud, vaddr);
31893 pte = fill_pte(pmd, vaddr);
31894
31895 + pax_open_kernel();
31896 set_pte(pte, new_pte);
31897 + pax_close_kernel();
31898
31899 /*
31900 * It's enough to flush this one mapping.
31901 @@ -338,14 +361,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
31902 pgd = pgd_offset_k((unsigned long)__va(phys));
31903 if (pgd_none(*pgd)) {
31904 pud = (pud_t *) spp_getpage();
31905 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
31906 - _PAGE_USER));
31907 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
31908 }
31909 pud = pud_offset(pgd, (unsigned long)__va(phys));
31910 if (pud_none(*pud)) {
31911 pmd = (pmd_t *) spp_getpage();
31912 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
31913 - _PAGE_USER));
31914 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
31915 }
31916 pmd = pmd_offset(pud, phys);
31917 BUG_ON(!pmd_none(*pmd));
31918 @@ -586,7 +607,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
31919 prot);
31920
31921 spin_lock(&init_mm.page_table_lock);
31922 - pud_populate(&init_mm, pud, pmd);
31923 + pud_populate_kernel(&init_mm, pud, pmd);
31924 spin_unlock(&init_mm.page_table_lock);
31925 }
31926 __flush_tlb_all();
31927 @@ -627,7 +648,7 @@ kernel_physical_mapping_init(unsigned long start,
31928 page_size_mask);
31929
31930 spin_lock(&init_mm.page_table_lock);
31931 - pgd_populate(&init_mm, pgd, pud);
31932 + pgd_populate_kernel(&init_mm, pgd, pud);
31933 spin_unlock(&init_mm.page_table_lock);
31934 pgd_changed = true;
31935 }
31936 @@ -1188,8 +1209,8 @@ int kern_addr_valid(unsigned long addr)
31937 static struct vm_area_struct gate_vma = {
31938 .vm_start = VSYSCALL_START,
31939 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
31940 - .vm_page_prot = PAGE_READONLY_EXEC,
31941 - .vm_flags = VM_READ | VM_EXEC
31942 + .vm_page_prot = PAGE_READONLY,
31943 + .vm_flags = VM_READ
31944 };
31945
31946 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
31947 @@ -1223,7 +1244,7 @@ int in_gate_area_no_mm(unsigned long addr)
31948
31949 const char *arch_vma_name(struct vm_area_struct *vma)
31950 {
31951 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
31952 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
31953 return "[vdso]";
31954 if (vma == &gate_vma)
31955 return "[vsyscall]";
31956 diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
31957 index 7b179b4..6bd17777 100644
31958 --- a/arch/x86/mm/iomap_32.c
31959 +++ b/arch/x86/mm/iomap_32.c
31960 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
31961 type = kmap_atomic_idx_push();
31962 idx = type + KM_TYPE_NR * smp_processor_id();
31963 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
31964 +
31965 + pax_open_kernel();
31966 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
31967 + pax_close_kernel();
31968 +
31969 arch_flush_lazy_mmu_mode();
31970
31971 return (void *)vaddr;
31972 diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
31973 index 799580c..72f9fe0 100644
31974 --- a/arch/x86/mm/ioremap.c
31975 +++ b/arch/x86/mm/ioremap.c
31976 @@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
31977 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
31978 int is_ram = page_is_ram(pfn);
31979
31980 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
31981 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
31982 return NULL;
31983 WARN_ON_ONCE(is_ram);
31984 }
31985 @@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
31986 *
31987 * Caller must ensure there is only one unmapping for the same pointer.
31988 */
31989 -void iounmap(volatile void __iomem *addr)
31990 +void iounmap(const volatile void __iomem *addr)
31991 {
31992 struct vm_struct *p, *o;
31993
31994 @@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
31995
31996 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
31997 if (page_is_ram(start >> PAGE_SHIFT))
31998 +#ifdef CONFIG_HIGHMEM
31999 + if ((start >> PAGE_SHIFT) < max_low_pfn)
32000 +#endif
32001 return __va(phys);
32002
32003 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
32004 @@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
32005 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
32006 {
32007 if (page_is_ram(phys >> PAGE_SHIFT))
32008 +#ifdef CONFIG_HIGHMEM
32009 + if ((phys >> PAGE_SHIFT) < max_low_pfn)
32010 +#endif
32011 return;
32012
32013 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
32014 @@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
32015 early_param("early_ioremap_debug", early_ioremap_debug_setup);
32016
32017 static __initdata int after_paging_init;
32018 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
32019 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
32020
32021 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
32022 {
32023 @@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
32024 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
32025
32026 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
32027 - memset(bm_pte, 0, sizeof(bm_pte));
32028 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
32029 + pmd_populate_user(&init_mm, pmd, bm_pte);
32030
32031 /*
32032 * The boot-ioremap range spans multiple pmds, for which
32033 diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
32034 index d87dd6d..bf3fa66 100644
32035 --- a/arch/x86/mm/kmemcheck/kmemcheck.c
32036 +++ b/arch/x86/mm/kmemcheck/kmemcheck.c
32037 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
32038 * memory (e.g. tracked pages)? For now, we need this to avoid
32039 * invoking kmemcheck for PnP BIOS calls.
32040 */
32041 - if (regs->flags & X86_VM_MASK)
32042 + if (v8086_mode(regs))
32043 return false;
32044 - if (regs->cs != __KERNEL_CS)
32045 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
32046 return false;
32047
32048 pte = kmemcheck_pte_lookup(address);
32049 diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
32050 index 25e7e13..1964579 100644
32051 --- a/arch/x86/mm/mmap.c
32052 +++ b/arch/x86/mm/mmap.c
32053 @@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
32054 * Leave an at least ~128 MB hole with possible stack randomization.
32055 */
32056 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
32057 -#define MAX_GAP (TASK_SIZE/6*5)
32058 +#define MAX_GAP (pax_task_size/6*5)
32059
32060 static int mmap_is_legacy(void)
32061 {
32062 @@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
32063 return rnd << PAGE_SHIFT;
32064 }
32065
32066 -static unsigned long mmap_base(void)
32067 +static unsigned long mmap_base(struct mm_struct *mm)
32068 {
32069 unsigned long gap = rlimit(RLIMIT_STACK);
32070 + unsigned long pax_task_size = TASK_SIZE;
32071 +
32072 +#ifdef CONFIG_PAX_SEGMEXEC
32073 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
32074 + pax_task_size = SEGMEXEC_TASK_SIZE;
32075 +#endif
32076
32077 if (gap < MIN_GAP)
32078 gap = MIN_GAP;
32079 else if (gap > MAX_GAP)
32080 gap = MAX_GAP;
32081
32082 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
32083 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
32084 }
32085
32086 /*
32087 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
32088 * does, but not when emulating X86_32
32089 */
32090 -static unsigned long mmap_legacy_base(void)
32091 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
32092 {
32093 - if (mmap_is_ia32())
32094 + if (mmap_is_ia32()) {
32095 +
32096 +#ifdef CONFIG_PAX_SEGMEXEC
32097 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
32098 + return SEGMEXEC_TASK_UNMAPPED_BASE;
32099 + else
32100 +#endif
32101 +
32102 return TASK_UNMAPPED_BASE;
32103 - else
32104 + } else
32105 return TASK_UNMAPPED_BASE + mmap_rnd();
32106 }
32107
32108 @@ -112,8 +125,15 @@ static unsigned long mmap_legacy_base(void)
32109 */
32110 void arch_pick_mmap_layout(struct mm_struct *mm)
32111 {
32112 - mm->mmap_legacy_base = mmap_legacy_base();
32113 - mm->mmap_base = mmap_base();
32114 + mm->mmap_legacy_base = mmap_legacy_base(mm);
32115 + mm->mmap_base = mmap_base(mm);
32116 +
32117 +#ifdef CONFIG_PAX_RANDMMAP
32118 + if (mm->pax_flags & MF_PAX_RANDMMAP) {
32119 + mm->mmap_legacy_base += mm->delta_mmap;
32120 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
32121 + }
32122 +#endif
32123
32124 if (mmap_is_legacy()) {
32125 mm->mmap_base = mm->mmap_legacy_base;
32126 diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
32127 index 0057a7a..95c7edd 100644
32128 --- a/arch/x86/mm/mmio-mod.c
32129 +++ b/arch/x86/mm/mmio-mod.c
32130 @@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
32131 break;
32132 default:
32133 {
32134 - unsigned char *ip = (unsigned char *)instptr;
32135 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
32136 my_trace->opcode = MMIO_UNKNOWN_OP;
32137 my_trace->width = 0;
32138 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
32139 @@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
32140 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
32141 void __iomem *addr)
32142 {
32143 - static atomic_t next_id;
32144 + static atomic_unchecked_t next_id;
32145 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
32146 /* These are page-unaligned. */
32147 struct mmiotrace_map map = {
32148 @@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
32149 .private = trace
32150 },
32151 .phys = offset,
32152 - .id = atomic_inc_return(&next_id)
32153 + .id = atomic_inc_return_unchecked(&next_id)
32154 };
32155 map.map_id = trace->id;
32156
32157 @@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
32158 ioremap_trace_core(offset, size, addr);
32159 }
32160
32161 -static void iounmap_trace_core(volatile void __iomem *addr)
32162 +static void iounmap_trace_core(const volatile void __iomem *addr)
32163 {
32164 struct mmiotrace_map map = {
32165 .phys = 0,
32166 @@ -328,7 +328,7 @@ not_enabled:
32167 }
32168 }
32169
32170 -void mmiotrace_iounmap(volatile void __iomem *addr)
32171 +void mmiotrace_iounmap(const volatile void __iomem *addr)
32172 {
32173 might_sleep();
32174 if (is_enabled()) /* recheck and proper locking in *_core() */
32175 diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
32176 index 8bf93ba..dbcd670 100644
32177 --- a/arch/x86/mm/numa.c
32178 +++ b/arch/x86/mm/numa.c
32179 @@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
32180 return true;
32181 }
32182
32183 -static int __init numa_register_memblks(struct numa_meminfo *mi)
32184 +static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
32185 {
32186 unsigned long uninitialized_var(pfn_align);
32187 int i, nid;
32188 diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
32189 index d0b1773..4c3327c 100644
32190 --- a/arch/x86/mm/pageattr-test.c
32191 +++ b/arch/x86/mm/pageattr-test.c
32192 @@ -36,7 +36,7 @@ enum {
32193
32194 static int pte_testbit(pte_t pte)
32195 {
32196 - return pte_flags(pte) & _PAGE_UNUSED1;
32197 + return pte_flags(pte) & _PAGE_CPA_TEST;
32198 }
32199
32200 struct split_state {
32201 diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
32202 index bb32480..75f2f5e 100644
32203 --- a/arch/x86/mm/pageattr.c
32204 +++ b/arch/x86/mm/pageattr.c
32205 @@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32206 */
32207 #ifdef CONFIG_PCI_BIOS
32208 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
32209 - pgprot_val(forbidden) |= _PAGE_NX;
32210 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32211 #endif
32212
32213 /*
32214 @@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32215 * Does not cover __inittext since that is gone later on. On
32216 * 64bit we do not enforce !NX on the low mapping
32217 */
32218 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
32219 - pgprot_val(forbidden) |= _PAGE_NX;
32220 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
32221 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32222
32223 +#ifdef CONFIG_DEBUG_RODATA
32224 /*
32225 * The .rodata section needs to be read-only. Using the pfn
32226 * catches all aliases.
32227 @@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32228 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
32229 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
32230 pgprot_val(forbidden) |= _PAGE_RW;
32231 +#endif
32232
32233 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
32234 /*
32235 @@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
32236 }
32237 #endif
32238
32239 +#ifdef CONFIG_PAX_KERNEXEC
32240 + if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
32241 + pgprot_val(forbidden) |= _PAGE_RW;
32242 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
32243 + }
32244 +#endif
32245 +
32246 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
32247
32248 return prot;
32249 @@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
32250 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
32251 {
32252 /* change init_mm */
32253 + pax_open_kernel();
32254 set_pte_atomic(kpte, pte);
32255 +
32256 #ifdef CONFIG_X86_32
32257 if (!SHARED_KERNEL_PMD) {
32258 +
32259 +#ifdef CONFIG_PAX_PER_CPU_PGD
32260 + unsigned long cpu;
32261 +#else
32262 struct page *page;
32263 +#endif
32264
32265 +#ifdef CONFIG_PAX_PER_CPU_PGD
32266 + for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
32267 + pgd_t *pgd = get_cpu_pgd(cpu, kernel);
32268 +#else
32269 list_for_each_entry(page, &pgd_list, lru) {
32270 - pgd_t *pgd;
32271 + pgd_t *pgd = (pgd_t *)page_address(page);
32272 +#endif
32273 +
32274 pud_t *pud;
32275 pmd_t *pmd;
32276
32277 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
32278 + pgd += pgd_index(address);
32279 pud = pud_offset(pgd, address);
32280 pmd = pmd_offset(pud, address);
32281 set_pte_atomic((pte_t *)pmd, pte);
32282 }
32283 }
32284 #endif
32285 + pax_close_kernel();
32286 }
32287
32288 static int
32289 diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
32290 index 6574388..87e9bef 100644
32291 --- a/arch/x86/mm/pat.c
32292 +++ b/arch/x86/mm/pat.c
32293 @@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
32294
32295 if (!entry) {
32296 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
32297 - current->comm, current->pid, start, end - 1);
32298 + current->comm, task_pid_nr(current), start, end - 1);
32299 return -EINVAL;
32300 }
32301
32302 @@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
32303
32304 while (cursor < to) {
32305 if (!devmem_is_allowed(pfn)) {
32306 - printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
32307 - current->comm, from, to - 1);
32308 + printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
32309 + current->comm, from, to - 1, cursor);
32310 return 0;
32311 }
32312 cursor += PAGE_SIZE;
32313 @@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
32314 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
32315 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
32316 "for [mem %#010Lx-%#010Lx]\n",
32317 - current->comm, current->pid,
32318 + current->comm, task_pid_nr(current),
32319 cattr_name(flags),
32320 base, (unsigned long long)(base + size-1));
32321 return -EINVAL;
32322 @@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
32323 flags = lookup_memtype(paddr);
32324 if (want_flags != flags) {
32325 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
32326 - current->comm, current->pid,
32327 + current->comm, task_pid_nr(current),
32328 cattr_name(want_flags),
32329 (unsigned long long)paddr,
32330 (unsigned long long)(paddr + size - 1),
32331 @@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
32332 free_memtype(paddr, paddr + size);
32333 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
32334 " for [mem %#010Lx-%#010Lx], got %s\n",
32335 - current->comm, current->pid,
32336 + current->comm, task_pid_nr(current),
32337 cattr_name(want_flags),
32338 (unsigned long long)paddr,
32339 (unsigned long long)(paddr + size - 1),
32340 diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
32341 index 415f6c4..d319983 100644
32342 --- a/arch/x86/mm/pat_rbtree.c
32343 +++ b/arch/x86/mm/pat_rbtree.c
32344 @@ -160,7 +160,7 @@ success:
32345
32346 failure:
32347 printk(KERN_INFO "%s:%d conflicting memory types "
32348 - "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
32349 + "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
32350 end, cattr_name(found_type), cattr_name(match->type));
32351 return -EBUSY;
32352 }
32353 diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
32354 index 9f0614d..92ae64a 100644
32355 --- a/arch/x86/mm/pf_in.c
32356 +++ b/arch/x86/mm/pf_in.c
32357 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
32358 int i;
32359 enum reason_type rv = OTHERS;
32360
32361 - p = (unsigned char *)ins_addr;
32362 + p = (unsigned char *)ktla_ktva(ins_addr);
32363 p += skip_prefix(p, &prf);
32364 p += get_opcode(p, &opcode);
32365
32366 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
32367 struct prefix_bits prf;
32368 int i;
32369
32370 - p = (unsigned char *)ins_addr;
32371 + p = (unsigned char *)ktla_ktva(ins_addr);
32372 p += skip_prefix(p, &prf);
32373 p += get_opcode(p, &opcode);
32374
32375 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
32376 struct prefix_bits prf;
32377 int i;
32378
32379 - p = (unsigned char *)ins_addr;
32380 + p = (unsigned char *)ktla_ktva(ins_addr);
32381 p += skip_prefix(p, &prf);
32382 p += get_opcode(p, &opcode);
32383
32384 @@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
32385 struct prefix_bits prf;
32386 int i;
32387
32388 - p = (unsigned char *)ins_addr;
32389 + p = (unsigned char *)ktla_ktva(ins_addr);
32390 p += skip_prefix(p, &prf);
32391 p += get_opcode(p, &opcode);
32392 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
32393 @@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
32394 struct prefix_bits prf;
32395 int i;
32396
32397 - p = (unsigned char *)ins_addr;
32398 + p = (unsigned char *)ktla_ktva(ins_addr);
32399 p += skip_prefix(p, &prf);
32400 p += get_opcode(p, &opcode);
32401 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
32402 diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
32403 index dfa537a..fd45c64 100644
32404 --- a/arch/x86/mm/pgtable.c
32405 +++ b/arch/x86/mm/pgtable.c
32406 @@ -91,10 +91,67 @@ static inline void pgd_list_del(pgd_t *pgd)
32407 list_del(&page->lru);
32408 }
32409
32410 -#define UNSHARED_PTRS_PER_PGD \
32411 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
32412 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32413 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
32414
32415 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
32416 +{
32417 + unsigned int count = USER_PGD_PTRS;
32418
32419 + if (!pax_user_shadow_base)
32420 + return;
32421 +
32422 + while (count--)
32423 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
32424 +}
32425 +#endif
32426 +
32427 +#ifdef CONFIG_PAX_PER_CPU_PGD
32428 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
32429 +{
32430 + unsigned int count = USER_PGD_PTRS;
32431 +
32432 + while (count--) {
32433 + pgd_t pgd;
32434 +
32435 +#ifdef CONFIG_X86_64
32436 + pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
32437 +#else
32438 + pgd = *src++;
32439 +#endif
32440 +
32441 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
32442 + pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
32443 +#endif
32444 +
32445 + *dst++ = pgd;
32446 + }
32447 +
32448 +}
32449 +#endif
32450 +
32451 +#ifdef CONFIG_X86_64
32452 +#define pxd_t pud_t
32453 +#define pyd_t pgd_t
32454 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
32455 +#define pxd_free(mm, pud) pud_free((mm), (pud))
32456 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
32457 +#define pyd_offset(mm, address) pgd_offset((mm), (address))
32458 +#define PYD_SIZE PGDIR_SIZE
32459 +#else
32460 +#define pxd_t pmd_t
32461 +#define pyd_t pud_t
32462 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
32463 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
32464 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
32465 +#define pyd_offset(mm, address) pud_offset((mm), (address))
32466 +#define PYD_SIZE PUD_SIZE
32467 +#endif
32468 +
32469 +#ifdef CONFIG_PAX_PER_CPU_PGD
32470 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
32471 +static inline void pgd_dtor(pgd_t *pgd) {}
32472 +#else
32473 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
32474 {
32475 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
32476 @@ -135,6 +192,7 @@ static void pgd_dtor(pgd_t *pgd)
32477 pgd_list_del(pgd);
32478 spin_unlock(&pgd_lock);
32479 }
32480 +#endif
32481
32482 /*
32483 * List of all pgd's needed for non-PAE so it can invalidate entries
32484 @@ -147,7 +205,7 @@ static void pgd_dtor(pgd_t *pgd)
32485 * -- nyc
32486 */
32487
32488 -#ifdef CONFIG_X86_PAE
32489 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
32490 /*
32491 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
32492 * updating the top-level pagetable entries to guarantee the
32493 @@ -159,7 +217,7 @@ static void pgd_dtor(pgd_t *pgd)
32494 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
32495 * and initialize the kernel pmds here.
32496 */
32497 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
32498 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
32499
32500 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
32501 {
32502 @@ -177,36 +235,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
32503 */
32504 flush_tlb_mm(mm);
32505 }
32506 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
32507 +#define PREALLOCATED_PXDS USER_PGD_PTRS
32508 #else /* !CONFIG_X86_PAE */
32509
32510 /* No need to prepopulate any pagetable entries in non-PAE modes. */
32511 -#define PREALLOCATED_PMDS 0
32512 +#define PREALLOCATED_PXDS 0
32513
32514 #endif /* CONFIG_X86_PAE */
32515
32516 -static void free_pmds(pmd_t *pmds[])
32517 +static void free_pxds(pxd_t *pxds[])
32518 {
32519 int i;
32520
32521 - for(i = 0; i < PREALLOCATED_PMDS; i++)
32522 - if (pmds[i])
32523 - free_page((unsigned long)pmds[i]);
32524 + for(i = 0; i < PREALLOCATED_PXDS; i++)
32525 + if (pxds[i])
32526 + free_page((unsigned long)pxds[i]);
32527 }
32528
32529 -static int preallocate_pmds(pmd_t *pmds[])
32530 +static int preallocate_pxds(pxd_t *pxds[])
32531 {
32532 int i;
32533 bool failed = false;
32534
32535 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
32536 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
32537 - if (pmd == NULL)
32538 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
32539 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
32540 + if (pxd == NULL)
32541 failed = true;
32542 - pmds[i] = pmd;
32543 + pxds[i] = pxd;
32544 }
32545
32546 if (failed) {
32547 - free_pmds(pmds);
32548 + free_pxds(pxds);
32549 return -ENOMEM;
32550 }
32551
32552 @@ -219,49 +279,52 @@ static int preallocate_pmds(pmd_t *pmds[])
32553 * preallocate which never got a corresponding vma will need to be
32554 * freed manually.
32555 */
32556 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
32557 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
32558 {
32559 int i;
32560
32561 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
32562 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
32563 pgd_t pgd = pgdp[i];
32564
32565 if (pgd_val(pgd) != 0) {
32566 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
32567 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
32568
32569 - pgdp[i] = native_make_pgd(0);
32570 + set_pgd(pgdp + i, native_make_pgd(0));
32571
32572 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
32573 - pmd_free(mm, pmd);
32574 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
32575 + pxd_free(mm, pxd);
32576 }
32577 }
32578 }
32579
32580 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
32581 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
32582 {
32583 - pud_t *pud;
32584 + pyd_t *pyd;
32585 int i;
32586
32587 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
32588 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
32589 return;
32590
32591 - pud = pud_offset(pgd, 0);
32592 -
32593 - for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
32594 - pmd_t *pmd = pmds[i];
32595 +#ifdef CONFIG_X86_64
32596 + pyd = pyd_offset(mm, 0L);
32597 +#else
32598 + pyd = pyd_offset(pgd, 0L);
32599 +#endif
32600
32601 + for (i = 0; i < PREALLOCATED_PXDS; i++, pyd++) {
32602 + pxd_t *pxd = pxds[i];
32603 if (i >= KERNEL_PGD_BOUNDARY)
32604 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32605 - sizeof(pmd_t) * PTRS_PER_PMD);
32606 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
32607 + sizeof(pxd_t) * PTRS_PER_PMD);
32608
32609 - pud_populate(mm, pud, pmd);
32610 + pyd_populate(mm, pyd, pxd);
32611 }
32612 }
32613
32614 pgd_t *pgd_alloc(struct mm_struct *mm)
32615 {
32616 pgd_t *pgd;
32617 - pmd_t *pmds[PREALLOCATED_PMDS];
32618 + pxd_t *pxds[PREALLOCATED_PXDS];
32619
32620 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
32621
32622 @@ -270,11 +333,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32623
32624 mm->pgd = pgd;
32625
32626 - if (preallocate_pmds(pmds) != 0)
32627 + if (preallocate_pxds(pxds) != 0)
32628 goto out_free_pgd;
32629
32630 if (paravirt_pgd_alloc(mm) != 0)
32631 - goto out_free_pmds;
32632 + goto out_free_pxds;
32633
32634 /*
32635 * Make sure that pre-populating the pmds is atomic with
32636 @@ -284,14 +347,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
32637 spin_lock(&pgd_lock);
32638
32639 pgd_ctor(mm, pgd);
32640 - pgd_prepopulate_pmd(mm, pgd, pmds);
32641 + pgd_prepopulate_pxd(mm, pgd, pxds);
32642
32643 spin_unlock(&pgd_lock);
32644
32645 return pgd;
32646
32647 -out_free_pmds:
32648 - free_pmds(pmds);
32649 +out_free_pxds:
32650 + free_pxds(pxds);
32651 out_free_pgd:
32652 free_page((unsigned long)pgd);
32653 out:
32654 @@ -300,7 +363,7 @@ out:
32655
32656 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
32657 {
32658 - pgd_mop_up_pmds(mm, pgd);
32659 + pgd_mop_up_pxds(mm, pgd);
32660 pgd_dtor(pgd);
32661 paravirt_pgd_free(mm, pgd);
32662 free_page((unsigned long)pgd);
32663 diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
32664 index a69bcb8..19068ab 100644
32665 --- a/arch/x86/mm/pgtable_32.c
32666 +++ b/arch/x86/mm/pgtable_32.c
32667 @@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
32668 return;
32669 }
32670 pte = pte_offset_kernel(pmd, vaddr);
32671 +
32672 + pax_open_kernel();
32673 if (pte_val(pteval))
32674 set_pte_at(&init_mm, vaddr, pte, pteval);
32675 else
32676 pte_clear(&init_mm, vaddr, pte);
32677 + pax_close_kernel();
32678
32679 /*
32680 * It's enough to flush this one mapping.
32681 diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
32682 index e666cbb..61788c45 100644
32683 --- a/arch/x86/mm/physaddr.c
32684 +++ b/arch/x86/mm/physaddr.c
32685 @@ -10,7 +10,7 @@
32686 #ifdef CONFIG_X86_64
32687
32688 #ifdef CONFIG_DEBUG_VIRTUAL
32689 -unsigned long __phys_addr(unsigned long x)
32690 +unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32691 {
32692 unsigned long y = x - __START_KERNEL_map;
32693
32694 @@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
32695 #else
32696
32697 #ifdef CONFIG_DEBUG_VIRTUAL
32698 -unsigned long __phys_addr(unsigned long x)
32699 +unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
32700 {
32701 unsigned long phys_addr = x - PAGE_OFFSET;
32702 /* VMALLOC_* aren't constants */
32703 diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
32704 index 90555bf..f5f1828 100644
32705 --- a/arch/x86/mm/setup_nx.c
32706 +++ b/arch/x86/mm/setup_nx.c
32707 @@ -5,8 +5,10 @@
32708 #include <asm/pgtable.h>
32709 #include <asm/proto.h>
32710
32711 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32712 static int disable_nx;
32713
32714 +#ifndef CONFIG_PAX_PAGEEXEC
32715 /*
32716 * noexec = on|off
32717 *
32718 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
32719 return 0;
32720 }
32721 early_param("noexec", noexec_setup);
32722 +#endif
32723 +
32724 +#endif
32725
32726 void x86_configure_nx(void)
32727 {
32728 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
32729 if (cpu_has_nx && !disable_nx)
32730 __supported_pte_mask |= _PAGE_NX;
32731 else
32732 +#endif
32733 __supported_pte_mask &= ~_PAGE_NX;
32734 }
32735
32736 diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
32737 index ae699b3..f1b2ad2 100644
32738 --- a/arch/x86/mm/tlb.c
32739 +++ b/arch/x86/mm/tlb.c
32740 @@ -48,7 +48,11 @@ void leave_mm(int cpu)
32741 BUG();
32742 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
32743 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
32744 +
32745 +#ifndef CONFIG_PAX_PER_CPU_PGD
32746 load_cr3(swapper_pg_dir);
32747 +#endif
32748 +
32749 }
32750 }
32751 EXPORT_SYMBOL_GPL(leave_mm);
32752 diff --git a/arch/x86/mm/uderef_64.c b/arch/x86/mm/uderef_64.c
32753 new file mode 100644
32754 index 0000000..dace51c
32755 --- /dev/null
32756 +++ b/arch/x86/mm/uderef_64.c
32757 @@ -0,0 +1,37 @@
32758 +#include <linux/mm.h>
32759 +#include <asm/pgtable.h>
32760 +#include <asm/uaccess.h>
32761 +
32762 +#ifdef CONFIG_PAX_MEMORY_UDEREF
32763 +/* PaX: due to the special call convention these functions must
32764 + * - remain leaf functions under all configurations,
32765 + * - never be called directly, only dereferenced from the wrappers.
32766 + */
32767 +void __pax_open_userland(void)
32768 +{
32769 + unsigned int cpu;
32770 +
32771 + if (unlikely(!segment_eq(get_fs(), USER_DS)))
32772 + return;
32773 +
32774 + cpu = raw_get_cpu();
32775 + BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_KERNEL);
32776 + write_cr3(__pa(get_cpu_pgd(cpu, user)) | PCID_USER | PCID_NOFLUSH);
32777 + raw_put_cpu_no_resched();
32778 +}
32779 +EXPORT_SYMBOL(__pax_open_userland);
32780 +
32781 +void __pax_close_userland(void)
32782 +{
32783 + unsigned int cpu;
32784 +
32785 + if (unlikely(!segment_eq(get_fs(), USER_DS)))
32786 + return;
32787 +
32788 + cpu = raw_get_cpu();
32789 + BUG_ON((read_cr3() & ~PAGE_MASK) != PCID_USER);
32790 + write_cr3(__pa(get_cpu_pgd(cpu, kernel)) | PCID_KERNEL | PCID_NOFLUSH);
32791 + raw_put_cpu_no_resched();
32792 +}
32793 +EXPORT_SYMBOL(__pax_close_userland);
32794 +#endif
32795 diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
32796 index 877b9a1..a8ecf42 100644
32797 --- a/arch/x86/net/bpf_jit.S
32798 +++ b/arch/x86/net/bpf_jit.S
32799 @@ -9,6 +9,7 @@
32800 */
32801 #include <linux/linkage.h>
32802 #include <asm/dwarf2.h>
32803 +#include <asm/alternative-asm.h>
32804
32805 /*
32806 * Calling convention :
32807 @@ -35,6 +36,7 @@ sk_load_word_positive_offset:
32808 jle bpf_slow_path_word
32809 mov (SKBDATA,%rsi),%eax
32810 bswap %eax /* ntohl() */
32811 + pax_force_retaddr
32812 ret
32813
32814 sk_load_half:
32815 @@ -52,6 +54,7 @@ sk_load_half_positive_offset:
32816 jle bpf_slow_path_half
32817 movzwl (SKBDATA,%rsi),%eax
32818 rol $8,%ax # ntohs()
32819 + pax_force_retaddr
32820 ret
32821
32822 sk_load_byte:
32823 @@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
32824 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
32825 jle bpf_slow_path_byte
32826 movzbl (SKBDATA,%rsi),%eax
32827 + pax_force_retaddr
32828 ret
32829
32830 /**
32831 @@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
32832 movzbl (SKBDATA,%rsi),%ebx
32833 and $15,%bl
32834 shl $2,%bl
32835 + pax_force_retaddr
32836 ret
32837
32838 /* rsi contains offset and can be scratched */
32839 @@ -109,6 +114,7 @@ bpf_slow_path_word:
32840 js bpf_error
32841 mov -12(%rbp),%eax
32842 bswap %eax
32843 + pax_force_retaddr
32844 ret
32845
32846 bpf_slow_path_half:
32847 @@ -117,12 +123,14 @@ bpf_slow_path_half:
32848 mov -12(%rbp),%ax
32849 rol $8,%ax
32850 movzwl %ax,%eax
32851 + pax_force_retaddr
32852 ret
32853
32854 bpf_slow_path_byte:
32855 bpf_slow_path_common(1)
32856 js bpf_error
32857 movzbl -12(%rbp),%eax
32858 + pax_force_retaddr
32859 ret
32860
32861 bpf_slow_path_byte_msh:
32862 @@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
32863 and $15,%al
32864 shl $2,%al
32865 xchg %eax,%ebx
32866 + pax_force_retaddr
32867 ret
32868
32869 #define sk_negative_common(SIZE) \
32870 @@ -157,6 +166,7 @@ sk_load_word_negative_offset:
32871 sk_negative_common(4)
32872 mov (%rax), %eax
32873 bswap %eax
32874 + pax_force_retaddr
32875 ret
32876
32877 bpf_slow_path_half_neg:
32878 @@ -168,6 +178,7 @@ sk_load_half_negative_offset:
32879 mov (%rax),%ax
32880 rol $8,%ax
32881 movzwl %ax,%eax
32882 + pax_force_retaddr
32883 ret
32884
32885 bpf_slow_path_byte_neg:
32886 @@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
32887 .globl sk_load_byte_negative_offset
32888 sk_negative_common(1)
32889 movzbl (%rax), %eax
32890 + pax_force_retaddr
32891 ret
32892
32893 bpf_slow_path_byte_msh_neg:
32894 @@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
32895 and $15,%al
32896 shl $2,%al
32897 xchg %eax,%ebx
32898 + pax_force_retaddr
32899 ret
32900
32901 bpf_error:
32902 @@ -197,4 +210,5 @@ bpf_error:
32903 xor %eax,%eax
32904 mov -8(%rbp),%rbx
32905 leaveq
32906 + pax_force_retaddr
32907 ret
32908 diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
32909 index 26328e8..5f96c25 100644
32910 --- a/arch/x86/net/bpf_jit_comp.c
32911 +++ b/arch/x86/net/bpf_jit_comp.c
32912 @@ -50,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
32913 return ptr + len;
32914 }
32915
32916 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32917 +#define MAX_INSTR_CODE_SIZE 96
32918 +#else
32919 +#define MAX_INSTR_CODE_SIZE 64
32920 +#endif
32921 +
32922 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
32923
32924 #define EMIT1(b1) EMIT(b1, 1)
32925 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
32926 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
32927 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
32928 +
32929 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
32930 +/* original constant will appear in ecx */
32931 +#define DILUTE_CONST_SEQUENCE(_off, _key) \
32932 +do { \
32933 + /* mov ecx, randkey */ \
32934 + EMIT1(0xb9); \
32935 + EMIT(_key, 4); \
32936 + /* xor ecx, randkey ^ off */ \
32937 + EMIT2(0x81, 0xf1); \
32938 + EMIT((_key) ^ (_off), 4); \
32939 +} while (0)
32940 +
32941 +#define EMIT1_off32(b1, _off) \
32942 +do { \
32943 + switch (b1) { \
32944 + case 0x05: /* add eax, imm32 */ \
32945 + case 0x2d: /* sub eax, imm32 */ \
32946 + case 0x25: /* and eax, imm32 */ \
32947 + case 0x0d: /* or eax, imm32 */ \
32948 + case 0xb8: /* mov eax, imm32 */ \
32949 + case 0x35: /* xor eax, imm32 */ \
32950 + case 0x3d: /* cmp eax, imm32 */ \
32951 + case 0xa9: /* test eax, imm32 */ \
32952 + DILUTE_CONST_SEQUENCE(_off, randkey); \
32953 + EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
32954 + break; \
32955 + case 0xbb: /* mov ebx, imm32 */ \
32956 + DILUTE_CONST_SEQUENCE(_off, randkey); \
32957 + /* mov ebx, ecx */ \
32958 + EMIT2(0x89, 0xcb); \
32959 + break; \
32960 + case 0xbe: /* mov esi, imm32 */ \
32961 + DILUTE_CONST_SEQUENCE(_off, randkey); \
32962 + /* mov esi, ecx */ \
32963 + EMIT2(0x89, 0xce); \
32964 + break; \
32965 + case 0xe8: /* call rel imm32, always to known funcs */ \
32966 + EMIT1(b1); \
32967 + EMIT(_off, 4); \
32968 + break; \
32969 + case 0xe9: /* jmp rel imm32 */ \
32970 + EMIT1(b1); \
32971 + EMIT(_off, 4); \
32972 + /* prevent fall-through, we're not called if off = 0 */ \
32973 + EMIT(0xcccccccc, 4); \
32974 + EMIT(0xcccccccc, 4); \
32975 + break; \
32976 + default: \
32977 + BUILD_BUG(); \
32978 + } \
32979 +} while (0)
32980 +
32981 +#define EMIT2_off32(b1, b2, _off) \
32982 +do { \
32983 + if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
32984 + EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
32985 + EMIT(randkey, 4); \
32986 + EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
32987 + EMIT((_off) - randkey, 4); \
32988 + } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
32989 + DILUTE_CONST_SEQUENCE(_off, randkey); \
32990 + /* imul eax, ecx */ \
32991 + EMIT3(0x0f, 0xaf, 0xc1); \
32992 + } else { \
32993 + BUILD_BUG(); \
32994 + } \
32995 +} while (0)
32996 +#else
32997 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
32998 +#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
32999 +#endif
33000
33001 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
33002 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
33003 @@ -91,6 +168,24 @@ do { \
33004 #define X86_JBE 0x76
33005 #define X86_JA 0x77
33006
33007 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33008 +#define APPEND_FLOW_VERIFY() \
33009 +do { \
33010 + /* mov ecx, randkey */ \
33011 + EMIT1(0xb9); \
33012 + EMIT(randkey, 4); \
33013 + /* cmp ecx, randkey */ \
33014 + EMIT2(0x81, 0xf9); \
33015 + EMIT(randkey, 4); \
33016 + /* jz after 8 int 3s */ \
33017 + EMIT2(0x74, 0x08); \
33018 + EMIT(0xcccccccc, 4); \
33019 + EMIT(0xcccccccc, 4); \
33020 +} while (0)
33021 +#else
33022 +#define APPEND_FLOW_VERIFY() do { } while (0)
33023 +#endif
33024 +
33025 #define EMIT_COND_JMP(op, offset) \
33026 do { \
33027 if (is_near(offset)) \
33028 @@ -98,6 +193,7 @@ do { \
33029 else { \
33030 EMIT2(0x0f, op + 0x10); \
33031 EMIT(offset, 4); /* jxx .+off32 */ \
33032 + APPEND_FLOW_VERIFY(); \
33033 } \
33034 } while (0)
33035
33036 @@ -145,55 +241,54 @@ static int pkt_type_offset(void)
33037 return -1;
33038 }
33039
33040 -struct bpf_binary_header {
33041 - unsigned int pages;
33042 - /* Note : for security reasons, bpf code will follow a randomly
33043 - * sized amount of int3 instructions
33044 - */
33045 - u8 image[];
33046 -};
33047 -
33048 -static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen,
33049 +/* Note : for security reasons, bpf code will follow a randomly
33050 + * sized amount of int3 instructions
33051 + */
33052 +static u8 *bpf_alloc_binary(unsigned int proglen,
33053 u8 **image_ptr)
33054 {
33055 unsigned int sz, hole;
33056 - struct bpf_binary_header *header;
33057 + u8 *header;
33058
33059 /* Most of BPF filters are really small,
33060 * but if some of them fill a page, allow at least
33061 * 128 extra bytes to insert a random section of int3
33062 */
33063 - sz = round_up(proglen + sizeof(*header) + 128, PAGE_SIZE);
33064 - header = module_alloc(sz);
33065 + sz = round_up(proglen + 128, PAGE_SIZE);
33066 + header = module_alloc_exec(sz);
33067 if (!header)
33068 return NULL;
33069
33070 + pax_open_kernel();
33071 memset(header, 0xcc, sz); /* fill whole space with int3 instructions */
33072 + pax_close_kernel();
33073
33074 - header->pages = sz / PAGE_SIZE;
33075 - hole = sz - (proglen + sizeof(*header));
33076 + hole = PAGE_SIZE - (proglen & ~PAGE_MASK);
33077
33078 /* insert a random number of int3 instructions before BPF code */
33079 - *image_ptr = &header->image[prandom_u32() % hole];
33080 + *image_ptr = &header[prandom_u32() % hole];
33081 return header;
33082 }
33083
33084 void bpf_jit_compile(struct sk_filter *fp)
33085 {
33086 - u8 temp[64];
33087 + u8 temp[MAX_INSTR_CODE_SIZE];
33088 u8 *prog;
33089 unsigned int proglen, oldproglen = 0;
33090 int ilen, i;
33091 int t_offset, f_offset;
33092 u8 t_op, f_op, seen = 0, pass;
33093 u8 *image = NULL;
33094 - struct bpf_binary_header *header = NULL;
33095 + u8 *header = NULL;
33096 u8 *func;
33097 int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
33098 unsigned int cleanup_addr; /* epilogue code offset */
33099 unsigned int *addrs;
33100 const struct sock_filter *filter = fp->insns;
33101 int flen = fp->len;
33102 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33103 + unsigned int randkey;
33104 +#endif
33105
33106 if (!bpf_jit_enable)
33107 return;
33108 @@ -202,11 +297,15 @@ void bpf_jit_compile(struct sk_filter *fp)
33109 if (addrs == NULL)
33110 return;
33111
33112 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33113 + randkey = get_random_int();
33114 +#endif
33115 +
33116 /* Before first pass, make a rough estimation of addrs[]
33117 - * each bpf instruction is translated to less than 64 bytes
33118 + * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
33119 */
33120 for (proglen = 0, i = 0; i < flen; i++) {
33121 - proglen += 64;
33122 + proglen += MAX_INSTR_CODE_SIZE;
33123 addrs[i] = proglen;
33124 }
33125 cleanup_addr = proglen; /* epilogue address */
33126 @@ -317,10 +416,8 @@ void bpf_jit_compile(struct sk_filter *fp)
33127 case BPF_S_ALU_MUL_K: /* A *= K */
33128 if (is_imm8(K))
33129 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
33130 - else {
33131 - EMIT2(0x69, 0xc0); /* imul imm32,%eax */
33132 - EMIT(K, 4);
33133 - }
33134 + else
33135 + EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
33136 break;
33137 case BPF_S_ALU_DIV_X: /* A /= X; */
33138 seen |= SEEN_XREG;
33139 @@ -360,13 +457,23 @@ void bpf_jit_compile(struct sk_filter *fp)
33140 break;
33141 case BPF_S_ALU_MOD_K: /* A %= K; */
33142 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
33143 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33144 + DILUTE_CONST_SEQUENCE(K, randkey);
33145 +#else
33146 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
33147 +#endif
33148 EMIT2(0xf7, 0xf1); /* div %ecx */
33149 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
33150 break;
33151 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
33152 +#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
33153 + DILUTE_CONST_SEQUENCE(K, randkey);
33154 + // imul rax, rcx
33155 + EMIT4(0x48, 0x0f, 0xaf, 0xc1);
33156 +#else
33157 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
33158 EMIT(K, 4);
33159 +#endif
33160 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
33161 break;
33162 case BPF_S_ALU_AND_X:
33163 @@ -637,8 +744,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
33164 if (is_imm8(K)) {
33165 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
33166 } else {
33167 - EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
33168 - EMIT(K, 4);
33169 + EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
33170 }
33171 } else {
33172 EMIT2(0x89,0xde); /* mov %ebx,%esi */
33173 @@ -728,10 +834,12 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
33174 if (unlikely(proglen + ilen > oldproglen)) {
33175 pr_err("bpb_jit_compile fatal error\n");
33176 kfree(addrs);
33177 - module_free(NULL, header);
33178 + module_free_exec(NULL, image);
33179 return;
33180 }
33181 + pax_open_kernel();
33182 memcpy(image + proglen, temp, ilen);
33183 + pax_close_kernel();
33184 }
33185 proglen += ilen;
33186 addrs[i] = proglen;
33187 @@ -764,7 +872,6 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
33188
33189 if (image) {
33190 bpf_flush_icache(header, image + proglen);
33191 - set_memory_ro((unsigned long)header, header->pages);
33192 fp->bpf_func = (void *)image;
33193 }
33194 out:
33195 @@ -776,10 +883,9 @@ static void bpf_jit_free_deferred(struct work_struct *work)
33196 {
33197 struct sk_filter *fp = container_of(work, struct sk_filter, work);
33198 unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
33199 - struct bpf_binary_header *header = (void *)addr;
33200
33201 - set_memory_rw(addr, header->pages);
33202 - module_free(NULL, header);
33203 + set_memory_rw(addr, 1);
33204 + module_free_exec(NULL, (void *)addr);
33205 kfree(fp);
33206 }
33207
33208 diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
33209 index d6aa6e8..266395a 100644
33210 --- a/arch/x86/oprofile/backtrace.c
33211 +++ b/arch/x86/oprofile/backtrace.c
33212 @@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
33213 struct stack_frame_ia32 *fp;
33214 unsigned long bytes;
33215
33216 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
33217 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
33218 if (bytes != sizeof(bufhead))
33219 return NULL;
33220
33221 - fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
33222 + fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
33223
33224 oprofile_add_trace(bufhead[0].return_address);
33225
33226 @@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
33227 struct stack_frame bufhead[2];
33228 unsigned long bytes;
33229
33230 - bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
33231 + bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
33232 if (bytes != sizeof(bufhead))
33233 return NULL;
33234
33235 @@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
33236 {
33237 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
33238
33239 - if (!user_mode_vm(regs)) {
33240 + if (!user_mode(regs)) {
33241 unsigned long stack = kernel_stack_pointer(regs);
33242 if (depth)
33243 dump_trace(NULL, regs, (unsigned long *)stack, 0,
33244 diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
33245 index 6890d84..1dad1f1 100644
33246 --- a/arch/x86/oprofile/nmi_int.c
33247 +++ b/arch/x86/oprofile/nmi_int.c
33248 @@ -23,6 +23,7 @@
33249 #include <asm/nmi.h>
33250 #include <asm/msr.h>
33251 #include <asm/apic.h>
33252 +#include <asm/pgtable.h>
33253
33254 #include "op_counter.h"
33255 #include "op_x86_model.h"
33256 @@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
33257 if (ret)
33258 return ret;
33259
33260 - if (!model->num_virt_counters)
33261 - model->num_virt_counters = model->num_counters;
33262 + if (!model->num_virt_counters) {
33263 + pax_open_kernel();
33264 + *(unsigned int *)&model->num_virt_counters = model->num_counters;
33265 + pax_close_kernel();
33266 + }
33267
33268 mux_init(ops);
33269
33270 diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
33271 index 50d86c0..7985318 100644
33272 --- a/arch/x86/oprofile/op_model_amd.c
33273 +++ b/arch/x86/oprofile/op_model_amd.c
33274 @@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
33275 num_counters = AMD64_NUM_COUNTERS;
33276 }
33277
33278 - op_amd_spec.num_counters = num_counters;
33279 - op_amd_spec.num_controls = num_counters;
33280 - op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
33281 + pax_open_kernel();
33282 + *(unsigned int *)&op_amd_spec.num_counters = num_counters;
33283 + *(unsigned int *)&op_amd_spec.num_controls = num_counters;
33284 + *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
33285 + pax_close_kernel();
33286
33287 return 0;
33288 }
33289 diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
33290 index d90528e..0127e2b 100644
33291 --- a/arch/x86/oprofile/op_model_ppro.c
33292 +++ b/arch/x86/oprofile/op_model_ppro.c
33293 @@ -19,6 +19,7 @@
33294 #include <asm/msr.h>
33295 #include <asm/apic.h>
33296 #include <asm/nmi.h>
33297 +#include <asm/pgtable.h>
33298
33299 #include "op_x86_model.h"
33300 #include "op_counter.h"
33301 @@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
33302
33303 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
33304
33305 - op_arch_perfmon_spec.num_counters = num_counters;
33306 - op_arch_perfmon_spec.num_controls = num_counters;
33307 + pax_open_kernel();
33308 + *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
33309 + *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
33310 + pax_close_kernel();
33311 }
33312
33313 static int arch_perfmon_init(struct oprofile_operations *ignore)
33314 diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
33315 index 71e8a67..6a313bb 100644
33316 --- a/arch/x86/oprofile/op_x86_model.h
33317 +++ b/arch/x86/oprofile/op_x86_model.h
33318 @@ -52,7 +52,7 @@ struct op_x86_model_spec {
33319 void (*switch_ctrl)(struct op_x86_model_spec const *model,
33320 struct op_msrs const * const msrs);
33321 #endif
33322 -};
33323 +} __do_const;
33324
33325 struct op_counter_config;
33326
33327 diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
33328 index 372e9b8..e775a6c 100644
33329 --- a/arch/x86/pci/irq.c
33330 +++ b/arch/x86/pci/irq.c
33331 @@ -50,7 +50,7 @@ struct irq_router {
33332 struct irq_router_handler {
33333 u16 vendor;
33334 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
33335 -};
33336 +} __do_const;
33337
33338 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
33339 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
33340 @@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
33341 return 0;
33342 }
33343
33344 -static __initdata struct irq_router_handler pirq_routers[] = {
33345 +static __initconst const struct irq_router_handler pirq_routers[] = {
33346 { PCI_VENDOR_ID_INTEL, intel_router_probe },
33347 { PCI_VENDOR_ID_AL, ali_router_probe },
33348 { PCI_VENDOR_ID_ITE, ite_router_probe },
33349 @@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
33350 static void __init pirq_find_router(struct irq_router *r)
33351 {
33352 struct irq_routing_table *rt = pirq_table;
33353 - struct irq_router_handler *h;
33354 + const struct irq_router_handler *h;
33355
33356 #ifdef CONFIG_PCI_BIOS
33357 if (!rt->signature) {
33358 @@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
33359 return 0;
33360 }
33361
33362 -static struct dmi_system_id __initdata pciirq_dmi_table[] = {
33363 +static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
33364 {
33365 .callback = fix_broken_hp_bios_irq9,
33366 .ident = "HP Pavilion N5400 Series Laptop",
33367 diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
33368 index 903fded..94b0d88 100644
33369 --- a/arch/x86/pci/mrst.c
33370 +++ b/arch/x86/pci/mrst.c
33371 @@ -241,7 +241,9 @@ int __init pci_mrst_init(void)
33372 pr_info("Intel MID platform detected, using MID PCI ops\n");
33373 pci_mmcfg_late_init();
33374 pcibios_enable_irq = mrst_pci_irq_enable;
33375 - pci_root_ops = pci_mrst_ops;
33376 + pax_open_kernel();
33377 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
33378 + pax_close_kernel();
33379 pci_soc_mode = 1;
33380 /* Continue with standard init */
33381 return 1;
33382 diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
33383 index c77b24a..c979855 100644
33384 --- a/arch/x86/pci/pcbios.c
33385 +++ b/arch/x86/pci/pcbios.c
33386 @@ -79,7 +79,7 @@ union bios32 {
33387 static struct {
33388 unsigned long address;
33389 unsigned short segment;
33390 -} bios32_indirect = { 0, __KERNEL_CS };
33391 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
33392
33393 /*
33394 * Returns the entry point for the given service, NULL on error
33395 @@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
33396 unsigned long length; /* %ecx */
33397 unsigned long entry; /* %edx */
33398 unsigned long flags;
33399 + struct desc_struct d, *gdt;
33400
33401 local_irq_save(flags);
33402 - __asm__("lcall *(%%edi); cld"
33403 +
33404 + gdt = get_cpu_gdt_table(smp_processor_id());
33405 +
33406 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
33407 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
33408 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
33409 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
33410 +
33411 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
33412 : "=a" (return_code),
33413 "=b" (address),
33414 "=c" (length),
33415 "=d" (entry)
33416 : "0" (service),
33417 "1" (0),
33418 - "D" (&bios32_indirect));
33419 + "D" (&bios32_indirect),
33420 + "r"(__PCIBIOS_DS)
33421 + : "memory");
33422 +
33423 + pax_open_kernel();
33424 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
33425 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
33426 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
33427 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
33428 + pax_close_kernel();
33429 +
33430 local_irq_restore(flags);
33431
33432 switch (return_code) {
33433 - case 0:
33434 - return address + entry;
33435 - case 0x80: /* Not present */
33436 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
33437 - return 0;
33438 - default: /* Shouldn't happen */
33439 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
33440 - service, return_code);
33441 + case 0: {
33442 + int cpu;
33443 + unsigned char flags;
33444 +
33445 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
33446 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
33447 + printk(KERN_WARNING "bios32_service: not valid\n");
33448 return 0;
33449 + }
33450 + address = address + PAGE_OFFSET;
33451 + length += 16UL; /* some BIOSs underreport this... */
33452 + flags = 4;
33453 + if (length >= 64*1024*1024) {
33454 + length >>= PAGE_SHIFT;
33455 + flags |= 8;
33456 + }
33457 +
33458 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
33459 + gdt = get_cpu_gdt_table(cpu);
33460 + pack_descriptor(&d, address, length, 0x9b, flags);
33461 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
33462 + pack_descriptor(&d, address, length, 0x93, flags);
33463 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
33464 + }
33465 + return entry;
33466 + }
33467 + case 0x80: /* Not present */
33468 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
33469 + return 0;
33470 + default: /* Shouldn't happen */
33471 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
33472 + service, return_code);
33473 + return 0;
33474 }
33475 }
33476
33477 static struct {
33478 unsigned long address;
33479 unsigned short segment;
33480 -} pci_indirect = { 0, __KERNEL_CS };
33481 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
33482
33483 -static int pci_bios_present;
33484 +static int pci_bios_present __read_only;
33485
33486 static int check_pcibios(void)
33487 {
33488 @@ -131,11 +174,13 @@ static int check_pcibios(void)
33489 unsigned long flags, pcibios_entry;
33490
33491 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
33492 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
33493 + pci_indirect.address = pcibios_entry;
33494
33495 local_irq_save(flags);
33496 - __asm__(
33497 - "lcall *(%%edi); cld\n\t"
33498 + __asm__("movw %w6, %%ds\n\t"
33499 + "lcall *%%ss:(%%edi); cld\n\t"
33500 + "push %%ss\n\t"
33501 + "pop %%ds\n\t"
33502 "jc 1f\n\t"
33503 "xor %%ah, %%ah\n"
33504 "1:"
33505 @@ -144,7 +189,8 @@ static int check_pcibios(void)
33506 "=b" (ebx),
33507 "=c" (ecx)
33508 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
33509 - "D" (&pci_indirect)
33510 + "D" (&pci_indirect),
33511 + "r" (__PCIBIOS_DS)
33512 : "memory");
33513 local_irq_restore(flags);
33514
33515 @@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33516
33517 switch (len) {
33518 case 1:
33519 - __asm__("lcall *(%%esi); cld\n\t"
33520 + __asm__("movw %w6, %%ds\n\t"
33521 + "lcall *%%ss:(%%esi); cld\n\t"
33522 + "push %%ss\n\t"
33523 + "pop %%ds\n\t"
33524 "jc 1f\n\t"
33525 "xor %%ah, %%ah\n"
33526 "1:"
33527 @@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33528 : "1" (PCIBIOS_READ_CONFIG_BYTE),
33529 "b" (bx),
33530 "D" ((long)reg),
33531 - "S" (&pci_indirect));
33532 + "S" (&pci_indirect),
33533 + "r" (__PCIBIOS_DS));
33534 /*
33535 * Zero-extend the result beyond 8 bits, do not trust the
33536 * BIOS having done it:
33537 @@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33538 *value &= 0xff;
33539 break;
33540 case 2:
33541 - __asm__("lcall *(%%esi); cld\n\t"
33542 + __asm__("movw %w6, %%ds\n\t"
33543 + "lcall *%%ss:(%%esi); cld\n\t"
33544 + "push %%ss\n\t"
33545 + "pop %%ds\n\t"
33546 "jc 1f\n\t"
33547 "xor %%ah, %%ah\n"
33548 "1:"
33549 @@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33550 : "1" (PCIBIOS_READ_CONFIG_WORD),
33551 "b" (bx),
33552 "D" ((long)reg),
33553 - "S" (&pci_indirect));
33554 + "S" (&pci_indirect),
33555 + "r" (__PCIBIOS_DS));
33556 /*
33557 * Zero-extend the result beyond 16 bits, do not trust the
33558 * BIOS having done it:
33559 @@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33560 *value &= 0xffff;
33561 break;
33562 case 4:
33563 - __asm__("lcall *(%%esi); cld\n\t"
33564 + __asm__("movw %w6, %%ds\n\t"
33565 + "lcall *%%ss:(%%esi); cld\n\t"
33566 + "push %%ss\n\t"
33567 + "pop %%ds\n\t"
33568 "jc 1f\n\t"
33569 "xor %%ah, %%ah\n"
33570 "1:"
33571 @@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
33572 : "1" (PCIBIOS_READ_CONFIG_DWORD),
33573 "b" (bx),
33574 "D" ((long)reg),
33575 - "S" (&pci_indirect));
33576 + "S" (&pci_indirect),
33577 + "r" (__PCIBIOS_DS));
33578 break;
33579 }
33580
33581 @@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33582
33583 switch (len) {
33584 case 1:
33585 - __asm__("lcall *(%%esi); cld\n\t"
33586 + __asm__("movw %w6, %%ds\n\t"
33587 + "lcall *%%ss:(%%esi); cld\n\t"
33588 + "push %%ss\n\t"
33589 + "pop %%ds\n\t"
33590 "jc 1f\n\t"
33591 "xor %%ah, %%ah\n"
33592 "1:"
33593 @@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33594 "c" (value),
33595 "b" (bx),
33596 "D" ((long)reg),
33597 - "S" (&pci_indirect));
33598 + "S" (&pci_indirect),
33599 + "r" (__PCIBIOS_DS));
33600 break;
33601 case 2:
33602 - __asm__("lcall *(%%esi); cld\n\t"
33603 + __asm__("movw %w6, %%ds\n\t"
33604 + "lcall *%%ss:(%%esi); cld\n\t"
33605 + "push %%ss\n\t"
33606 + "pop %%ds\n\t"
33607 "jc 1f\n\t"
33608 "xor %%ah, %%ah\n"
33609 "1:"
33610 @@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33611 "c" (value),
33612 "b" (bx),
33613 "D" ((long)reg),
33614 - "S" (&pci_indirect));
33615 + "S" (&pci_indirect),
33616 + "r" (__PCIBIOS_DS));
33617 break;
33618 case 4:
33619 - __asm__("lcall *(%%esi); cld\n\t"
33620 + __asm__("movw %w6, %%ds\n\t"
33621 + "lcall *%%ss:(%%esi); cld\n\t"
33622 + "push %%ss\n\t"
33623 + "pop %%ds\n\t"
33624 "jc 1f\n\t"
33625 "xor %%ah, %%ah\n"
33626 "1:"
33627 @@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
33628 "c" (value),
33629 "b" (bx),
33630 "D" ((long)reg),
33631 - "S" (&pci_indirect));
33632 + "S" (&pci_indirect),
33633 + "r" (__PCIBIOS_DS));
33634 break;
33635 }
33636
33637 @@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33638
33639 DBG("PCI: Fetching IRQ routing table... ");
33640 __asm__("push %%es\n\t"
33641 + "movw %w8, %%ds\n\t"
33642 "push %%ds\n\t"
33643 "pop %%es\n\t"
33644 - "lcall *(%%esi); cld\n\t"
33645 + "lcall *%%ss:(%%esi); cld\n\t"
33646 "pop %%es\n\t"
33647 + "push %%ss\n\t"
33648 + "pop %%ds\n"
33649 "jc 1f\n\t"
33650 "xor %%ah, %%ah\n"
33651 "1:"
33652 @@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
33653 "1" (0),
33654 "D" ((long) &opt),
33655 "S" (&pci_indirect),
33656 - "m" (opt)
33657 + "m" (opt),
33658 + "r" (__PCIBIOS_DS)
33659 : "memory");
33660 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
33661 if (ret & 0xff00)
33662 @@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33663 {
33664 int ret;
33665
33666 - __asm__("lcall *(%%esi); cld\n\t"
33667 + __asm__("movw %w5, %%ds\n\t"
33668 + "lcall *%%ss:(%%esi); cld\n\t"
33669 + "push %%ss\n\t"
33670 + "pop %%ds\n"
33671 "jc 1f\n\t"
33672 "xor %%ah, %%ah\n"
33673 "1:"
33674 @@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
33675 : "0" (PCIBIOS_SET_PCI_HW_INT),
33676 "b" ((dev->bus->number << 8) | dev->devfn),
33677 "c" ((irq << 8) | (pin + 10)),
33678 - "S" (&pci_indirect));
33679 + "S" (&pci_indirect),
33680 + "r" (__PCIBIOS_DS));
33681 return !(ret & 0xff00);
33682 }
33683 EXPORT_SYMBOL(pcibios_set_irq_routing);
33684 diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
33685 index 40e4469..d915bf9 100644
33686 --- a/arch/x86/platform/efi/efi_32.c
33687 +++ b/arch/x86/platform/efi/efi_32.c
33688 @@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
33689 {
33690 struct desc_ptr gdt_descr;
33691
33692 +#ifdef CONFIG_PAX_KERNEXEC
33693 + struct desc_struct d;
33694 +#endif
33695 +
33696 local_irq_save(efi_rt_eflags);
33697
33698 load_cr3(initial_page_table);
33699 __flush_tlb_all();
33700
33701 +#ifdef CONFIG_PAX_KERNEXEC
33702 + pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
33703 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33704 + pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
33705 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33706 +#endif
33707 +
33708 gdt_descr.address = __pa(get_cpu_gdt_table(0));
33709 gdt_descr.size = GDT_SIZE - 1;
33710 load_gdt(&gdt_descr);
33711 @@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
33712 {
33713 struct desc_ptr gdt_descr;
33714
33715 +#ifdef CONFIG_PAX_KERNEXEC
33716 + struct desc_struct d;
33717 +
33718 + memset(&d, 0, sizeof d);
33719 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
33720 + write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
33721 +#endif
33722 +
33723 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
33724 gdt_descr.size = GDT_SIZE - 1;
33725 load_gdt(&gdt_descr);
33726
33727 +#ifdef CONFIG_PAX_PER_CPU_PGD
33728 + load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33729 +#else
33730 load_cr3(swapper_pg_dir);
33731 +#endif
33732 +
33733 __flush_tlb_all();
33734
33735 local_irq_restore(efi_rt_eflags);
33736 diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
33737 index 39a0e7f1..872396e 100644
33738 --- a/arch/x86/platform/efi/efi_64.c
33739 +++ b/arch/x86/platform/efi/efi_64.c
33740 @@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
33741 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
33742 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
33743 }
33744 +
33745 +#ifdef CONFIG_PAX_PER_CPU_PGD
33746 + load_cr3(swapper_pg_dir);
33747 +#endif
33748 +
33749 __flush_tlb_all();
33750 }
33751
33752 @@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
33753 for (pgd = 0; pgd < n_pgds; pgd++)
33754 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
33755 kfree(save_pgd);
33756 +
33757 +#ifdef CONFIG_PAX_PER_CPU_PGD
33758 + load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
33759 +#endif
33760 +
33761 __flush_tlb_all();
33762 local_irq_restore(efi_flags);
33763 early_code_mapping_set_exec(0);
33764 diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
33765 index fbe66e6..eae5e38 100644
33766 --- a/arch/x86/platform/efi/efi_stub_32.S
33767 +++ b/arch/x86/platform/efi/efi_stub_32.S
33768 @@ -6,7 +6,9 @@
33769 */
33770
33771 #include <linux/linkage.h>
33772 +#include <linux/init.h>
33773 #include <asm/page_types.h>
33774 +#include <asm/segment.h>
33775
33776 /*
33777 * efi_call_phys(void *, ...) is a function with variable parameters.
33778 @@ -20,7 +22,7 @@
33779 * service functions will comply with gcc calling convention, too.
33780 */
33781
33782 -.text
33783 +__INIT
33784 ENTRY(efi_call_phys)
33785 /*
33786 * 0. The function can only be called in Linux kernel. So CS has been
33787 @@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
33788 * The mapping of lower virtual memory has been created in prelog and
33789 * epilog.
33790 */
33791 - movl $1f, %edx
33792 - subl $__PAGE_OFFSET, %edx
33793 - jmp *%edx
33794 +#ifdef CONFIG_PAX_KERNEXEC
33795 + movl $(__KERNEXEC_EFI_DS), %edx
33796 + mov %edx, %ds
33797 + mov %edx, %es
33798 + mov %edx, %ss
33799 + addl $2f,(1f)
33800 + ljmp *(1f)
33801 +
33802 +__INITDATA
33803 +1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
33804 +.previous
33805 +
33806 +2:
33807 + subl $2b,(1b)
33808 +#else
33809 + jmp 1f-__PAGE_OFFSET
33810 1:
33811 +#endif
33812
33813 /*
33814 * 2. Now on the top of stack is the return
33815 @@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
33816 * parameter 2, ..., param n. To make things easy, we save the return
33817 * address of efi_call_phys in a global variable.
33818 */
33819 - popl %edx
33820 - movl %edx, saved_return_addr
33821 - /* get the function pointer into ECX*/
33822 - popl %ecx
33823 - movl %ecx, efi_rt_function_ptr
33824 - movl $2f, %edx
33825 - subl $__PAGE_OFFSET, %edx
33826 - pushl %edx
33827 + popl (saved_return_addr)
33828 + popl (efi_rt_function_ptr)
33829
33830 /*
33831 * 3. Clear PG bit in %CR0.
33832 @@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
33833 /*
33834 * 5. Call the physical function.
33835 */
33836 - jmp *%ecx
33837 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
33838
33839 -2:
33840 /*
33841 * 6. After EFI runtime service returns, control will return to
33842 * following instruction. We'd better readjust stack pointer first.
33843 @@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
33844 movl %cr0, %edx
33845 orl $0x80000000, %edx
33846 movl %edx, %cr0
33847 - jmp 1f
33848 -1:
33849 +
33850 /*
33851 * 8. Now restore the virtual mode from flat mode by
33852 * adding EIP with PAGE_OFFSET.
33853 */
33854 - movl $1f, %edx
33855 - jmp *%edx
33856 +#ifdef CONFIG_PAX_KERNEXEC
33857 + movl $(__KERNEL_DS), %edx
33858 + mov %edx, %ds
33859 + mov %edx, %es
33860 + mov %edx, %ss
33861 + ljmp $(__KERNEL_CS),$1f
33862 +#else
33863 + jmp 1f+__PAGE_OFFSET
33864 +#endif
33865 1:
33866
33867 /*
33868 * 9. Balance the stack. And because EAX contain the return value,
33869 * we'd better not clobber it.
33870 */
33871 - leal efi_rt_function_ptr, %edx
33872 - movl (%edx), %ecx
33873 - pushl %ecx
33874 + pushl (efi_rt_function_ptr)
33875
33876 /*
33877 - * 10. Push the saved return address onto the stack and return.
33878 + * 10. Return to the saved return address.
33879 */
33880 - leal saved_return_addr, %edx
33881 - movl (%edx), %ecx
33882 - pushl %ecx
33883 - ret
33884 + jmpl *(saved_return_addr)
33885 ENDPROC(efi_call_phys)
33886 .previous
33887
33888 -.data
33889 +__INITDATA
33890 saved_return_addr:
33891 .long 0
33892 efi_rt_function_ptr:
33893 diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
33894 index 4c07cca..2c8427d 100644
33895 --- a/arch/x86/platform/efi/efi_stub_64.S
33896 +++ b/arch/x86/platform/efi/efi_stub_64.S
33897 @@ -7,6 +7,7 @@
33898 */
33899
33900 #include <linux/linkage.h>
33901 +#include <asm/alternative-asm.h>
33902
33903 #define SAVE_XMM \
33904 mov %rsp, %rax; \
33905 @@ -40,6 +41,7 @@ ENTRY(efi_call0)
33906 call *%rdi
33907 addq $32, %rsp
33908 RESTORE_XMM
33909 + pax_force_retaddr 0, 1
33910 ret
33911 ENDPROC(efi_call0)
33912
33913 @@ -50,6 +52,7 @@ ENTRY(efi_call1)
33914 call *%rdi
33915 addq $32, %rsp
33916 RESTORE_XMM
33917 + pax_force_retaddr 0, 1
33918 ret
33919 ENDPROC(efi_call1)
33920
33921 @@ -60,6 +63,7 @@ ENTRY(efi_call2)
33922 call *%rdi
33923 addq $32, %rsp
33924 RESTORE_XMM
33925 + pax_force_retaddr 0, 1
33926 ret
33927 ENDPROC(efi_call2)
33928
33929 @@ -71,6 +75,7 @@ ENTRY(efi_call3)
33930 call *%rdi
33931 addq $32, %rsp
33932 RESTORE_XMM
33933 + pax_force_retaddr 0, 1
33934 ret
33935 ENDPROC(efi_call3)
33936
33937 @@ -83,6 +88,7 @@ ENTRY(efi_call4)
33938 call *%rdi
33939 addq $32, %rsp
33940 RESTORE_XMM
33941 + pax_force_retaddr 0, 1
33942 ret
33943 ENDPROC(efi_call4)
33944
33945 @@ -96,6 +102,7 @@ ENTRY(efi_call5)
33946 call *%rdi
33947 addq $48, %rsp
33948 RESTORE_XMM
33949 + pax_force_retaddr 0, 1
33950 ret
33951 ENDPROC(efi_call5)
33952
33953 @@ -112,5 +119,6 @@ ENTRY(efi_call6)
33954 call *%rdi
33955 addq $48, %rsp
33956 RESTORE_XMM
33957 + pax_force_retaddr 0, 1
33958 ret
33959 ENDPROC(efi_call6)
33960 diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
33961 index 3ca5957..7909c18 100644
33962 --- a/arch/x86/platform/mrst/mrst.c
33963 +++ b/arch/x86/platform/mrst/mrst.c
33964 @@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
33965 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
33966 int sfi_mrtc_num;
33967
33968 -static void mrst_power_off(void)
33969 +static __noreturn void mrst_power_off(void)
33970 {
33971 + BUG();
33972 }
33973
33974 -static void mrst_reboot(void)
33975 +static __noreturn void mrst_reboot(void)
33976 {
33977 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
33978 + BUG();
33979 }
33980
33981 /* parse all the mtimer info to a static mtimer array */
33982 diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
33983 index d6ee929..3637cb5 100644
33984 --- a/arch/x86/platform/olpc/olpc_dt.c
33985 +++ b/arch/x86/platform/olpc/olpc_dt.c
33986 @@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
33987 return res;
33988 }
33989
33990 -static struct of_pdt_ops prom_olpc_ops __initdata = {
33991 +static struct of_pdt_ops prom_olpc_ops __initconst = {
33992 .nextprop = olpc_dt_nextprop,
33993 .getproplen = olpc_dt_getproplen,
33994 .getproperty = olpc_dt_getproperty,
33995 diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
33996 index 424f4c9..f2a2988 100644
33997 --- a/arch/x86/power/cpu.c
33998 +++ b/arch/x86/power/cpu.c
33999 @@ -137,11 +137,8 @@ static void do_fpu_end(void)
34000 static void fix_processor_context(void)
34001 {
34002 int cpu = smp_processor_id();
34003 - struct tss_struct *t = &per_cpu(init_tss, cpu);
34004 -#ifdef CONFIG_X86_64
34005 - struct desc_struct *desc = get_cpu_gdt_table(cpu);
34006 - tss_desc tss;
34007 -#endif
34008 + struct tss_struct *t = init_tss + cpu;
34009 +
34010 set_tss_desc(cpu, t); /*
34011 * This just modifies memory; should not be
34012 * necessary. But... This is necessary, because
34013 @@ -150,10 +147,6 @@ static void fix_processor_context(void)
34014 */
34015
34016 #ifdef CONFIG_X86_64
34017 - memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
34018 - tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
34019 - write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
34020 -
34021 syscall_init(); /* This sets MSR_*STAR and related */
34022 #endif
34023 load_TR_desc(); /* This does ltr */
34024 diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
34025 index a44f457..9140171 100644
34026 --- a/arch/x86/realmode/init.c
34027 +++ b/arch/x86/realmode/init.c
34028 @@ -70,7 +70,13 @@ void __init setup_real_mode(void)
34029 __va(real_mode_header->trampoline_header);
34030
34031 #ifdef CONFIG_X86_32
34032 - trampoline_header->start = __pa_symbol(startup_32_smp);
34033 + trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
34034 +
34035 +#ifdef CONFIG_PAX_KERNEXEC
34036 + trampoline_header->start -= LOAD_PHYSICAL_ADDR;
34037 +#endif
34038 +
34039 + trampoline_header->boot_cs = __BOOT_CS;
34040 trampoline_header->gdt_limit = __BOOT_DS + 7;
34041 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
34042 #else
34043 @@ -86,7 +92,7 @@ void __init setup_real_mode(void)
34044 *trampoline_cr4_features = read_cr4();
34045
34046 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
34047 - trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
34048 + trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
34049 trampoline_pgd[511] = init_level4_pgt[511].pgd;
34050 #endif
34051 }
34052 diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
34053 index 9cac825..4890b25 100644
34054 --- a/arch/x86/realmode/rm/Makefile
34055 +++ b/arch/x86/realmode/rm/Makefile
34056 @@ -79,5 +79,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
34057 $(call cc-option, -fno-unit-at-a-time)) \
34058 $(call cc-option, -fno-stack-protector) \
34059 $(call cc-option, -mpreferred-stack-boundary=2)
34060 +ifdef CONSTIFY_PLUGIN
34061 +KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
34062 +endif
34063 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
34064 GCOV_PROFILE := n
34065 diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
34066 index a28221d..93c40f1 100644
34067 --- a/arch/x86/realmode/rm/header.S
34068 +++ b/arch/x86/realmode/rm/header.S
34069 @@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
34070 #endif
34071 /* APM/BIOS reboot */
34072 .long pa_machine_real_restart_asm
34073 -#ifdef CONFIG_X86_64
34074 +#ifdef CONFIG_X86_32
34075 + .long __KERNEL_CS
34076 +#else
34077 .long __KERNEL32_CS
34078 #endif
34079 END(real_mode_header)
34080 diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
34081 index c1b2791..f9e31c7 100644
34082 --- a/arch/x86/realmode/rm/trampoline_32.S
34083 +++ b/arch/x86/realmode/rm/trampoline_32.S
34084 @@ -25,6 +25,12 @@
34085 #include <asm/page_types.h>
34086 #include "realmode.h"
34087
34088 +#ifdef CONFIG_PAX_KERNEXEC
34089 +#define ta(X) (X)
34090 +#else
34091 +#define ta(X) (pa_ ## X)
34092 +#endif
34093 +
34094 .text
34095 .code16
34096
34097 @@ -39,8 +45,6 @@ ENTRY(trampoline_start)
34098
34099 cli # We should be safe anyway
34100
34101 - movl tr_start, %eax # where we need to go
34102 -
34103 movl $0xA5A5A5A5, trampoline_status
34104 # write marker for master knows we're running
34105
34106 @@ -56,7 +60,7 @@ ENTRY(trampoline_start)
34107 movw $1, %dx # protected mode (PE) bit
34108 lmsw %dx # into protected mode
34109
34110 - ljmpl $__BOOT_CS, $pa_startup_32
34111 + ljmpl *(trampoline_header)
34112
34113 .section ".text32","ax"
34114 .code32
34115 @@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
34116 .balign 8
34117 GLOBAL(trampoline_header)
34118 tr_start: .space 4
34119 - tr_gdt_pad: .space 2
34120 + tr_boot_cs: .space 2
34121 tr_gdt: .space 6
34122 END(trampoline_header)
34123
34124 diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
34125 index bb360dc..d0fd8f8 100644
34126 --- a/arch/x86/realmode/rm/trampoline_64.S
34127 +++ b/arch/x86/realmode/rm/trampoline_64.S
34128 @@ -94,6 +94,7 @@ ENTRY(startup_32)
34129 movl %edx, %gs
34130
34131 movl pa_tr_cr4, %eax
34132 + andl $~X86_CR4_PCIDE, %eax
34133 movl %eax, %cr4 # Enable PAE mode
34134
34135 # Setup trampoline 4 level pagetables
34136 @@ -107,7 +108,7 @@ ENTRY(startup_32)
34137 wrmsr
34138
34139 # Enable paging and in turn activate Long Mode
34140 - movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
34141 + movl $(X86_CR0_PG | X86_CR0_PE), %eax
34142 movl %eax, %cr0
34143
34144 /*
34145 diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
34146 index e812034..c747134 100644
34147 --- a/arch/x86/tools/Makefile
34148 +++ b/arch/x86/tools/Makefile
34149 @@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
34150
34151 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
34152
34153 -HOST_EXTRACFLAGS += -I$(srctree)/tools/include
34154 +HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
34155 hostprogs-y += relocs
34156 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
34157 relocs: $(obj)/relocs
34158 diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
34159 index f7bab68..b6d9886 100644
34160 --- a/arch/x86/tools/relocs.c
34161 +++ b/arch/x86/tools/relocs.c
34162 @@ -1,5 +1,7 @@
34163 /* This is included from relocs_32/64.c */
34164
34165 +#include "../../../include/generated/autoconf.h"
34166 +
34167 #define ElfW(type) _ElfW(ELF_BITS, type)
34168 #define _ElfW(bits, type) __ElfW(bits, type)
34169 #define __ElfW(bits, type) Elf##bits##_##type
34170 @@ -11,6 +13,7 @@
34171 #define Elf_Sym ElfW(Sym)
34172
34173 static Elf_Ehdr ehdr;
34174 +static Elf_Phdr *phdr;
34175
34176 struct relocs {
34177 uint32_t *offset;
34178 @@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
34179 }
34180 }
34181
34182 +static void read_phdrs(FILE *fp)
34183 +{
34184 + unsigned int i;
34185 +
34186 + phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
34187 + if (!phdr) {
34188 + die("Unable to allocate %d program headers\n",
34189 + ehdr.e_phnum);
34190 + }
34191 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
34192 + die("Seek to %d failed: %s\n",
34193 + ehdr.e_phoff, strerror(errno));
34194 + }
34195 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
34196 + die("Cannot read ELF program headers: %s\n",
34197 + strerror(errno));
34198 + }
34199 + for(i = 0; i < ehdr.e_phnum; i++) {
34200 + phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
34201 + phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
34202 + phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
34203 + phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
34204 + phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
34205 + phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
34206 + phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
34207 + phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
34208 + }
34209 +
34210 +}
34211 +
34212 static void read_shdrs(FILE *fp)
34213 {
34214 - int i;
34215 + unsigned int i;
34216 Elf_Shdr shdr;
34217
34218 secs = calloc(ehdr.e_shnum, sizeof(struct section));
34219 @@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
34220
34221 static void read_strtabs(FILE *fp)
34222 {
34223 - int i;
34224 + unsigned int i;
34225 for (i = 0; i < ehdr.e_shnum; i++) {
34226 struct section *sec = &secs[i];
34227 if (sec->shdr.sh_type != SHT_STRTAB) {
34228 @@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
34229
34230 static void read_symtabs(FILE *fp)
34231 {
34232 - int i,j;
34233 + unsigned int i,j;
34234 for (i = 0; i < ehdr.e_shnum; i++) {
34235 struct section *sec = &secs[i];
34236 if (sec->shdr.sh_type != SHT_SYMTAB) {
34237 @@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
34238 }
34239
34240
34241 -static void read_relocs(FILE *fp)
34242 +static void read_relocs(FILE *fp, int use_real_mode)
34243 {
34244 - int i,j;
34245 + unsigned int i,j;
34246 + uint32_t base;
34247 +
34248 for (i = 0; i < ehdr.e_shnum; i++) {
34249 struct section *sec = &secs[i];
34250 if (sec->shdr.sh_type != SHT_REL_TYPE) {
34251 @@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
34252 die("Cannot read symbol table: %s\n",
34253 strerror(errno));
34254 }
34255 + base = 0;
34256 +
34257 +#ifdef CONFIG_X86_32
34258 + for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
34259 + if (phdr[j].p_type != PT_LOAD )
34260 + continue;
34261 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
34262 + continue;
34263 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
34264 + break;
34265 + }
34266 +#endif
34267 +
34268 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
34269 Elf_Rel *rel = &sec->reltab[j];
34270 - rel->r_offset = elf_addr_to_cpu(rel->r_offset);
34271 + rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
34272 rel->r_info = elf_xword_to_cpu(rel->r_info);
34273 #if (SHT_REL_TYPE == SHT_RELA)
34274 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
34275 @@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
34276
34277 static void print_absolute_symbols(void)
34278 {
34279 - int i;
34280 + unsigned int i;
34281 const char *format;
34282
34283 if (ELF_BITS == 64)
34284 @@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
34285 for (i = 0; i < ehdr.e_shnum; i++) {
34286 struct section *sec = &secs[i];
34287 char *sym_strtab;
34288 - int j;
34289 + unsigned int j;
34290
34291 if (sec->shdr.sh_type != SHT_SYMTAB) {
34292 continue;
34293 @@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
34294
34295 static void print_absolute_relocs(void)
34296 {
34297 - int i, printed = 0;
34298 + unsigned int i, printed = 0;
34299 const char *format;
34300
34301 if (ELF_BITS == 64)
34302 @@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
34303 struct section *sec_applies, *sec_symtab;
34304 char *sym_strtab;
34305 Elf_Sym *sh_symtab;
34306 - int j;
34307 + unsigned int j;
34308 if (sec->shdr.sh_type != SHT_REL_TYPE) {
34309 continue;
34310 }
34311 @@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
34312 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
34313 Elf_Sym *sym, const char *symname))
34314 {
34315 - int i;
34316 + unsigned int i;
34317 /* Walk through the relocations */
34318 for (i = 0; i < ehdr.e_shnum; i++) {
34319 char *sym_strtab;
34320 Elf_Sym *sh_symtab;
34321 struct section *sec_applies, *sec_symtab;
34322 - int j;
34323 + unsigned int j;
34324 struct section *sec = &secs[i];
34325
34326 if (sec->shdr.sh_type != SHT_REL_TYPE) {
34327 @@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
34328 {
34329 unsigned r_type = ELF32_R_TYPE(rel->r_info);
34330 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
34331 + char *sym_strtab = sec->link->link->strtab;
34332 +
34333 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
34334 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
34335 + return 0;
34336 +
34337 +#ifdef CONFIG_PAX_KERNEXEC
34338 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
34339 + if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
34340 + return 0;
34341 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
34342 + return 0;
34343 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
34344 + return 0;
34345 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
34346 + return 0;
34347 +#endif
34348
34349 switch (r_type) {
34350 case R_386_NONE:
34351 @@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
34352
34353 static void emit_relocs(int as_text, int use_real_mode)
34354 {
34355 - int i;
34356 + unsigned int i;
34357 int (*write_reloc)(uint32_t, FILE *) = write32;
34358 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
34359 const char *symname);
34360 @@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
34361 {
34362 regex_init(use_real_mode);
34363 read_ehdr(fp);
34364 + read_phdrs(fp);
34365 read_shdrs(fp);
34366 read_strtabs(fp);
34367 read_symtabs(fp);
34368 - read_relocs(fp);
34369 + read_relocs(fp, use_real_mode);
34370 if (ELF_BITS == 64)
34371 percpu_init();
34372 if (show_absolute_syms) {
34373 diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
34374 index 80ffa5b..a33bd15 100644
34375 --- a/arch/x86/um/tls_32.c
34376 +++ b/arch/x86/um/tls_32.c
34377 @@ -260,7 +260,7 @@ out:
34378 if (unlikely(task == current &&
34379 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
34380 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
34381 - "without flushed TLS.", current->pid);
34382 + "without flushed TLS.", task_pid_nr(current));
34383 }
34384
34385 return 0;
34386 diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
34387 index fd14be1..e3c79c0 100644
34388 --- a/arch/x86/vdso/Makefile
34389 +++ b/arch/x86/vdso/Makefile
34390 @@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
34391 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
34392 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
34393
34394 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
34395 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
34396 GCOV_PROFILE := n
34397
34398 #
34399 diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
34400 index d6bfb87..876ee18 100644
34401 --- a/arch/x86/vdso/vdso32-setup.c
34402 +++ b/arch/x86/vdso/vdso32-setup.c
34403 @@ -25,6 +25,7 @@
34404 #include <asm/tlbflush.h>
34405 #include <asm/vdso.h>
34406 #include <asm/proto.h>
34407 +#include <asm/mman.h>
34408
34409 enum {
34410 VDSO_DISABLED = 0,
34411 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
34412 void enable_sep_cpu(void)
34413 {
34414 int cpu = get_cpu();
34415 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
34416 + struct tss_struct *tss = init_tss + cpu;
34417
34418 if (!boot_cpu_has(X86_FEATURE_SEP)) {
34419 put_cpu();
34420 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
34421 gate_vma.vm_start = FIXADDR_USER_START;
34422 gate_vma.vm_end = FIXADDR_USER_END;
34423 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
34424 - gate_vma.vm_page_prot = __P101;
34425 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
34426
34427 return 0;
34428 }
34429 @@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
34430 if (compat)
34431 addr = VDSO_HIGH_BASE;
34432 else {
34433 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
34434 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
34435 if (IS_ERR_VALUE(addr)) {
34436 ret = addr;
34437 goto up_fail;
34438 }
34439 }
34440
34441 - current->mm->context.vdso = (void *)addr;
34442 + current->mm->context.vdso = addr;
34443
34444 if (compat_uses_vma || !compat) {
34445 /*
34446 @@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
34447 }
34448
34449 current_thread_info()->sysenter_return =
34450 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
34451 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
34452
34453 up_fail:
34454 if (ret)
34455 - current->mm->context.vdso = NULL;
34456 + current->mm->context.vdso = 0;
34457
34458 up_write(&mm->mmap_sem);
34459
34460 @@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
34461
34462 const char *arch_vma_name(struct vm_area_struct *vma)
34463 {
34464 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
34465 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
34466 return "[vdso]";
34467 +
34468 +#ifdef CONFIG_PAX_SEGMEXEC
34469 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
34470 + return "[vdso]";
34471 +#endif
34472 +
34473 return NULL;
34474 }
34475
34476 @@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
34477 * Check to see if the corresponding task was created in compat vdso
34478 * mode.
34479 */
34480 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
34481 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
34482 return &gate_vma;
34483 return NULL;
34484 }
34485 diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
34486 index 431e875..cbb23f3 100644
34487 --- a/arch/x86/vdso/vma.c
34488 +++ b/arch/x86/vdso/vma.c
34489 @@ -16,8 +16,6 @@
34490 #include <asm/vdso.h>
34491 #include <asm/page.h>
34492
34493 -unsigned int __read_mostly vdso_enabled = 1;
34494 -
34495 extern char vdso_start[], vdso_end[];
34496 extern unsigned short vdso_sync_cpuid;
34497
34498 @@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
34499 * unaligned here as a result of stack start randomization.
34500 */
34501 addr = PAGE_ALIGN(addr);
34502 - addr = align_vdso_addr(addr);
34503
34504 return addr;
34505 }
34506 @@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
34507 unsigned size)
34508 {
34509 struct mm_struct *mm = current->mm;
34510 - unsigned long addr;
34511 + unsigned long addr = 0;
34512 int ret;
34513
34514 - if (!vdso_enabled)
34515 - return 0;
34516 -
34517 down_write(&mm->mmap_sem);
34518 +
34519 +#ifdef CONFIG_PAX_RANDMMAP
34520 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
34521 +#endif
34522 +
34523 addr = vdso_addr(mm->start_stack, size);
34524 + addr = align_vdso_addr(addr);
34525 addr = get_unmapped_area(NULL, addr, size, 0, 0);
34526 if (IS_ERR_VALUE(addr)) {
34527 ret = addr;
34528 goto up_fail;
34529 }
34530
34531 - current->mm->context.vdso = (void *)addr;
34532 + mm->context.vdso = addr;
34533
34534 ret = install_special_mapping(mm, addr, size,
34535 VM_READ|VM_EXEC|
34536 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
34537 pages);
34538 - if (ret) {
34539 - current->mm->context.vdso = NULL;
34540 - goto up_fail;
34541 - }
34542 + if (ret)
34543 + mm->context.vdso = 0;
34544
34545 up_fail:
34546 up_write(&mm->mmap_sem);
34547 @@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
34548 vdsox32_size);
34549 }
34550 #endif
34551 -
34552 -static __init int vdso_setup(char *s)
34553 -{
34554 - vdso_enabled = simple_strtoul(s, NULL, 0);
34555 - return 0;
34556 -}
34557 -__setup("vdso=", vdso_setup);
34558 diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
34559 index fa6ade7..73da73a5 100644
34560 --- a/arch/x86/xen/enlighten.c
34561 +++ b/arch/x86/xen/enlighten.c
34562 @@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
34563
34564 struct shared_info xen_dummy_shared_info;
34565
34566 -void *xen_initial_gdt;
34567 -
34568 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
34569 __read_mostly int xen_have_vector_callback;
34570 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
34571 @@ -541,8 +539,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
34572 {
34573 unsigned long va = dtr->address;
34574 unsigned int size = dtr->size + 1;
34575 - unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34576 - unsigned long frames[pages];
34577 + unsigned long frames[65536 / PAGE_SIZE];
34578 int f;
34579
34580 /*
34581 @@ -590,8 +587,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34582 {
34583 unsigned long va = dtr->address;
34584 unsigned int size = dtr->size + 1;
34585 - unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
34586 - unsigned long frames[pages];
34587 + unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
34588 int f;
34589
34590 /*
34591 @@ -599,7 +595,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
34592 * 8-byte entries, or 16 4k pages..
34593 */
34594
34595 - BUG_ON(size > 65536);
34596 + BUG_ON(size > GDT_SIZE);
34597 BUG_ON(va & ~PAGE_MASK);
34598
34599 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
34600 @@ -988,7 +984,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
34601 return 0;
34602 }
34603
34604 -static void set_xen_basic_apic_ops(void)
34605 +static void __init set_xen_basic_apic_ops(void)
34606 {
34607 apic->read = xen_apic_read;
34608 apic->write = xen_apic_write;
34609 @@ -1293,30 +1289,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
34610 #endif
34611 };
34612
34613 -static void xen_reboot(int reason)
34614 +static __noreturn void xen_reboot(int reason)
34615 {
34616 struct sched_shutdown r = { .reason = reason };
34617
34618 - if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
34619 - BUG();
34620 + HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
34621 + BUG();
34622 }
34623
34624 -static void xen_restart(char *msg)
34625 +static __noreturn void xen_restart(char *msg)
34626 {
34627 xen_reboot(SHUTDOWN_reboot);
34628 }
34629
34630 -static void xen_emergency_restart(void)
34631 +static __noreturn void xen_emergency_restart(void)
34632 {
34633 xen_reboot(SHUTDOWN_reboot);
34634 }
34635
34636 -static void xen_machine_halt(void)
34637 +static __noreturn void xen_machine_halt(void)
34638 {
34639 xen_reboot(SHUTDOWN_poweroff);
34640 }
34641
34642 -static void xen_machine_power_off(void)
34643 +static __noreturn void xen_machine_power_off(void)
34644 {
34645 if (pm_power_off)
34646 pm_power_off();
34647 @@ -1467,7 +1463,17 @@ asmlinkage void __init xen_start_kernel(void)
34648 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
34649
34650 /* Work out if we support NX */
34651 - x86_configure_nx();
34652 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
34653 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
34654 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
34655 + unsigned l, h;
34656 +
34657 + __supported_pte_mask |= _PAGE_NX;
34658 + rdmsr(MSR_EFER, l, h);
34659 + l |= EFER_NX;
34660 + wrmsr(MSR_EFER, l, h);
34661 + }
34662 +#endif
34663
34664 xen_setup_features();
34665
34666 @@ -1498,13 +1504,6 @@ asmlinkage void __init xen_start_kernel(void)
34667
34668 machine_ops = xen_machine_ops;
34669
34670 - /*
34671 - * The only reliable way to retain the initial address of the
34672 - * percpu gdt_page is to remember it here, so we can go and
34673 - * mark it RW later, when the initial percpu area is freed.
34674 - */
34675 - xen_initial_gdt = &per_cpu(gdt_page, 0);
34676 -
34677 xen_smp_init();
34678
34679 #ifdef CONFIG_ACPI_NUMA
34680 diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
34681 index fdc3ba2..23cae00 100644
34682 --- a/arch/x86/xen/mmu.c
34683 +++ b/arch/x86/xen/mmu.c
34684 @@ -379,7 +379,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
34685 return val;
34686 }
34687
34688 -static pteval_t pte_pfn_to_mfn(pteval_t val)
34689 +static pteval_t __intentional_overflow(-1) pte_pfn_to_mfn(pteval_t val)
34690 {
34691 if (val & _PAGE_PRESENT) {
34692 unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
34693 @@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34694 /* L3_k[510] -> level2_kernel_pgt
34695 * L3_i[511] -> level2_fixmap_pgt */
34696 convert_pfn_mfn(level3_kernel_pgt);
34697 + convert_pfn_mfn(level3_vmalloc_start_pgt);
34698 + convert_pfn_mfn(level3_vmalloc_end_pgt);
34699 + convert_pfn_mfn(level3_vmemmap_pgt);
34700
34701 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
34702 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
34703 @@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
34704 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
34705 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
34706 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
34707 + set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
34708 + set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
34709 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
34710 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
34711 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
34712 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
34713 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
34714 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
34715
34716 @@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
34717 pv_mmu_ops.set_pud = xen_set_pud;
34718 #if PAGETABLE_LEVELS == 4
34719 pv_mmu_ops.set_pgd = xen_set_pgd;
34720 + pv_mmu_ops.set_pgd_batched = xen_set_pgd;
34721 #endif
34722
34723 /* This will work as long as patching hasn't happened yet
34724 @@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
34725 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
34726 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
34727 .set_pgd = xen_set_pgd_hyper,
34728 + .set_pgd_batched = xen_set_pgd_hyper,
34729
34730 .alloc_pud = xen_alloc_pmd_init,
34731 .release_pud = xen_release_pmd_init,
34732 diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
34733 index 31d0475..51af671 100644
34734 --- a/arch/x86/xen/smp.c
34735 +++ b/arch/x86/xen/smp.c
34736 @@ -274,17 +274,13 @@ static void __init xen_smp_prepare_boot_cpu(void)
34737 native_smp_prepare_boot_cpu();
34738
34739 if (xen_pv_domain()) {
34740 - /* We've switched to the "real" per-cpu gdt, so make sure the
34741 - old memory can be recycled */
34742 - make_lowmem_page_readwrite(xen_initial_gdt);
34743 -
34744 #ifdef CONFIG_X86_32
34745 /*
34746 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
34747 * expects __USER_DS
34748 */
34749 - loadsegment(ds, __USER_DS);
34750 - loadsegment(es, __USER_DS);
34751 + loadsegment(ds, __KERNEL_DS);
34752 + loadsegment(es, __KERNEL_DS);
34753 #endif
34754
34755 xen_filter_cpu_maps();
34756 @@ -364,7 +360,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34757 ctxt->user_regs.ss = __KERNEL_DS;
34758 #ifdef CONFIG_X86_32
34759 ctxt->user_regs.fs = __KERNEL_PERCPU;
34760 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
34761 + savesegment(gs, ctxt->user_regs.gs);
34762 #else
34763 ctxt->gs_base_kernel = per_cpu_offset(cpu);
34764 #endif
34765 @@ -374,8 +370,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
34766
34767 {
34768 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
34769 - ctxt->user_regs.ds = __USER_DS;
34770 - ctxt->user_regs.es = __USER_DS;
34771 + ctxt->user_regs.ds = __KERNEL_DS;
34772 + ctxt->user_regs.es = __KERNEL_DS;
34773
34774 xen_copy_trap_info(ctxt->trap_ctxt);
34775
34776 @@ -420,13 +416,12 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
34777 int rc;
34778
34779 per_cpu(current_task, cpu) = idle;
34780 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
34781 #ifdef CONFIG_X86_32
34782 irq_ctx_init(cpu);
34783 #else
34784 clear_tsk_thread_flag(idle, TIF_FORK);
34785 - per_cpu(kernel_stack, cpu) =
34786 - (unsigned long)task_stack_page(idle) -
34787 - KERNEL_STACK_OFFSET + THREAD_SIZE;
34788 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
34789 #endif
34790 xen_setup_runstate_info(cpu);
34791 xen_setup_timer(cpu);
34792 @@ -702,7 +697,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
34793
34794 void __init xen_smp_init(void)
34795 {
34796 - smp_ops = xen_smp_ops;
34797 + memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
34798 xen_fill_possible_map();
34799 }
34800
34801 diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
34802 index 33ca6e4..0ded929 100644
34803 --- a/arch/x86/xen/xen-asm_32.S
34804 +++ b/arch/x86/xen/xen-asm_32.S
34805 @@ -84,14 +84,14 @@ ENTRY(xen_iret)
34806 ESP_OFFSET=4 # bytes pushed onto stack
34807
34808 /*
34809 - * Store vcpu_info pointer for easy access. Do it this way to
34810 - * avoid having to reload %fs
34811 + * Store vcpu_info pointer for easy access.
34812 */
34813 #ifdef CONFIG_SMP
34814 - GET_THREAD_INFO(%eax)
34815 - movl %ss:TI_cpu(%eax), %eax
34816 - movl %ss:__per_cpu_offset(,%eax,4), %eax
34817 - mov %ss:xen_vcpu(%eax), %eax
34818 + push %fs
34819 + mov $(__KERNEL_PERCPU), %eax
34820 + mov %eax, %fs
34821 + mov PER_CPU_VAR(xen_vcpu), %eax
34822 + pop %fs
34823 #else
34824 movl %ss:xen_vcpu, %eax
34825 #endif
34826 diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
34827 index 7faed58..ba4427c 100644
34828 --- a/arch/x86/xen/xen-head.S
34829 +++ b/arch/x86/xen/xen-head.S
34830 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
34831 #ifdef CONFIG_X86_32
34832 mov %esi,xen_start_info
34833 mov $init_thread_union+THREAD_SIZE,%esp
34834 +#ifdef CONFIG_SMP
34835 + movl $cpu_gdt_table,%edi
34836 + movl $__per_cpu_load,%eax
34837 + movw %ax,__KERNEL_PERCPU + 2(%edi)
34838 + rorl $16,%eax
34839 + movb %al,__KERNEL_PERCPU + 4(%edi)
34840 + movb %ah,__KERNEL_PERCPU + 7(%edi)
34841 + movl $__per_cpu_end - 1,%eax
34842 + subl $__per_cpu_start,%eax
34843 + movw %ax,__KERNEL_PERCPU + 0(%edi)
34844 +#endif
34845 #else
34846 mov %rsi,xen_start_info
34847 mov $init_thread_union+THREAD_SIZE,%rsp
34848 diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
34849 index 95f8c61..611d6e8 100644
34850 --- a/arch/x86/xen/xen-ops.h
34851 +++ b/arch/x86/xen/xen-ops.h
34852 @@ -10,8 +10,6 @@
34853 extern const char xen_hypervisor_callback[];
34854 extern const char xen_failsafe_callback[];
34855
34856 -extern void *xen_initial_gdt;
34857 -
34858 struct trap_info;
34859 void xen_copy_trap_info(struct trap_info *traps);
34860
34861 diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
34862 index 525bd3d..ef888b1 100644
34863 --- a/arch/xtensa/variants/dc232b/include/variant/core.h
34864 +++ b/arch/xtensa/variants/dc232b/include/variant/core.h
34865 @@ -119,9 +119,9 @@
34866 ----------------------------------------------------------------------*/
34867
34868 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
34869 -#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
34870 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
34871 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
34872 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34873
34874 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
34875 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
34876 diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
34877 index 2f33760..835e50a 100644
34878 --- a/arch/xtensa/variants/fsf/include/variant/core.h
34879 +++ b/arch/xtensa/variants/fsf/include/variant/core.h
34880 @@ -11,6 +11,7 @@
34881 #ifndef _XTENSA_CORE_H
34882 #define _XTENSA_CORE_H
34883
34884 +#include <linux/const.h>
34885
34886 /****************************************************************************
34887 Parameters Useful for Any Code, USER or PRIVILEGED
34888 @@ -112,9 +113,9 @@
34889 ----------------------------------------------------------------------*/
34890
34891 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
34892 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
34893 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
34894 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
34895 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34896
34897 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
34898 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
34899 diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
34900 index af00795..2bb8105 100644
34901 --- a/arch/xtensa/variants/s6000/include/variant/core.h
34902 +++ b/arch/xtensa/variants/s6000/include/variant/core.h
34903 @@ -11,6 +11,7 @@
34904 #ifndef _XTENSA_CORE_CONFIGURATION_H
34905 #define _XTENSA_CORE_CONFIGURATION_H
34906
34907 +#include <linux/const.h>
34908
34909 /****************************************************************************
34910 Parameters Useful for Any Code, USER or PRIVILEGED
34911 @@ -118,9 +119,9 @@
34912 ----------------------------------------------------------------------*/
34913
34914 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
34915 -#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
34916 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
34917 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
34918 +#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
34919
34920 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
34921 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
34922 diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
34923 index 4e491d9..c8e18e4 100644
34924 --- a/block/blk-cgroup.c
34925 +++ b/block/blk-cgroup.c
34926 @@ -812,7 +812,7 @@ static void blkcg_css_free(struct cgroup_subsys_state *css)
34927 static struct cgroup_subsys_state *
34928 blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
34929 {
34930 - static atomic64_t id_seq = ATOMIC64_INIT(0);
34931 + static atomic64_unchecked_t id_seq = ATOMIC64_INIT(0);
34932 struct blkcg *blkcg;
34933
34934 if (!parent_css) {
34935 @@ -826,7 +826,7 @@ blkcg_css_alloc(struct cgroup_subsys_state *parent_css)
34936
34937 blkcg->cfq_weight = CFQ_WEIGHT_DEFAULT;
34938 blkcg->cfq_leaf_weight = CFQ_WEIGHT_DEFAULT;
34939 - blkcg->id = atomic64_inc_return(&id_seq); /* root is 0, start from 1 */
34940 + blkcg->id = atomic64_inc_return_unchecked(&id_seq); /* root is 0, start from 1 */
34941 done:
34942 spin_lock_init(&blkcg->lock);
34943 INIT_RADIX_TREE(&blkcg->blkg_tree, GFP_ATOMIC);
34944 diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
34945 index 4b8d9b54..a7178c0 100644
34946 --- a/block/blk-iopoll.c
34947 +++ b/block/blk-iopoll.c
34948 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
34949 }
34950 EXPORT_SYMBOL(blk_iopoll_complete);
34951
34952 -static void blk_iopoll_softirq(struct softirq_action *h)
34953 +static __latent_entropy void blk_iopoll_softirq(void)
34954 {
34955 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
34956 int rearm = 0, budget = blk_iopoll_budget;
34957 diff --git a/block/blk-map.c b/block/blk-map.c
34958 index 623e1cd..ca1e109 100644
34959 --- a/block/blk-map.c
34960 +++ b/block/blk-map.c
34961 @@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
34962 if (!len || !kbuf)
34963 return -EINVAL;
34964
34965 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
34966 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
34967 if (do_copy)
34968 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
34969 else
34970 diff --git a/block/blk-softirq.c b/block/blk-softirq.c
34971 index ec9e606..3f38839 100644
34972 --- a/block/blk-softirq.c
34973 +++ b/block/blk-softirq.c
34974 @@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
34975 * Softirq action handler - move entries to local list and loop over them
34976 * while passing them to the queue registered handler.
34977 */
34978 -static void blk_done_softirq(struct softirq_action *h)
34979 +static __latent_entropy void blk_done_softirq(void)
34980 {
34981 struct list_head *cpu_list, local_list;
34982
34983 diff --git a/block/bsg.c b/block/bsg.c
34984 index 420a5a9..23834aa 100644
34985 --- a/block/bsg.c
34986 +++ b/block/bsg.c
34987 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
34988 struct sg_io_v4 *hdr, struct bsg_device *bd,
34989 fmode_t has_write_perm)
34990 {
34991 + unsigned char tmpcmd[sizeof(rq->__cmd)];
34992 + unsigned char *cmdptr;
34993 +
34994 if (hdr->request_len > BLK_MAX_CDB) {
34995 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
34996 if (!rq->cmd)
34997 return -ENOMEM;
34998 - }
34999 + cmdptr = rq->cmd;
35000 + } else
35001 + cmdptr = tmpcmd;
35002
35003 - if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
35004 + if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
35005 hdr->request_len))
35006 return -EFAULT;
35007
35008 + if (cmdptr != rq->cmd)
35009 + memcpy(rq->cmd, cmdptr, hdr->request_len);
35010 +
35011 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
35012 if (blk_verify_command(rq->cmd, has_write_perm))
35013 return -EPERM;
35014 diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
35015 index fbd5a67..5d631b5 100644
35016 --- a/block/compat_ioctl.c
35017 +++ b/block/compat_ioctl.c
35018 @@ -341,7 +341,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
35019 err |= __get_user(f->spec1, &uf->spec1);
35020 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
35021 err |= __get_user(name, &uf->name);
35022 - f->name = compat_ptr(name);
35023 + f->name = (void __force_kernel *)compat_ptr(name);
35024 if (err) {
35025 err = -EFAULT;
35026 goto out;
35027 diff --git a/block/genhd.c b/block/genhd.c
35028 index 791f419..89f21c4 100644
35029 --- a/block/genhd.c
35030 +++ b/block/genhd.c
35031 @@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
35032
35033 /*
35034 * Register device numbers dev..(dev+range-1)
35035 - * range must be nonzero
35036 + * Noop if @range is zero.
35037 * The hash chain is sorted on range, so that subranges can override.
35038 */
35039 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
35040 struct kobject *(*probe)(dev_t, int *, void *),
35041 int (*lock)(dev_t, void *), void *data)
35042 {
35043 - kobj_map(bdev_map, devt, range, module, probe, lock, data);
35044 + if (range)
35045 + kobj_map(bdev_map, devt, range, module, probe, lock, data);
35046 }
35047
35048 EXPORT_SYMBOL(blk_register_region);
35049
35050 +/* undo blk_register_region(), noop if @range is zero */
35051 void blk_unregister_region(dev_t devt, unsigned long range)
35052 {
35053 - kobj_unmap(bdev_map, devt, range);
35054 + if (range)
35055 + kobj_unmap(bdev_map, devt, range);
35056 }
35057
35058 EXPORT_SYMBOL(blk_unregister_region);
35059 diff --git a/block/partitions/efi.c b/block/partitions/efi.c
35060 index a8287b4..241a48e 100644
35061 --- a/block/partitions/efi.c
35062 +++ b/block/partitions/efi.c
35063 @@ -292,14 +292,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
35064 if (!gpt)
35065 return NULL;
35066
35067 + if (!le32_to_cpu(gpt->num_partition_entries))
35068 + return NULL;
35069 + pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
35070 + if (!pte)
35071 + return NULL;
35072 +
35073 count = le32_to_cpu(gpt->num_partition_entries) *
35074 le32_to_cpu(gpt->sizeof_partition_entry);
35075 - if (!count)
35076 - return NULL;
35077 - pte = kmalloc(count, GFP_KERNEL);
35078 - if (!pte)
35079 - return NULL;
35080 -
35081 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
35082 (u8 *) pte, count) < count) {
35083 kfree(pte);
35084 diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
35085 index a5ffcc9..e057498 100644
35086 --- a/block/scsi_ioctl.c
35087 +++ b/block/scsi_ioctl.c
35088 @@ -67,7 +67,7 @@ static int scsi_get_bus(struct request_queue *q, int __user *p)
35089 return put_user(0, p);
35090 }
35091
35092 -static int sg_get_timeout(struct request_queue *q)
35093 +static int __intentional_overflow(-1) sg_get_timeout(struct request_queue *q)
35094 {
35095 return jiffies_to_clock_t(q->sg_timeout);
35096 }
35097 @@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
35098 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
35099 struct sg_io_hdr *hdr, fmode_t mode)
35100 {
35101 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
35102 + unsigned char tmpcmd[sizeof(rq->__cmd)];
35103 + unsigned char *cmdptr;
35104 +
35105 + if (rq->cmd != rq->__cmd)
35106 + cmdptr = rq->cmd;
35107 + else
35108 + cmdptr = tmpcmd;
35109 +
35110 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
35111 return -EFAULT;
35112 +
35113 + if (cmdptr != rq->cmd)
35114 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
35115 +
35116 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
35117 return -EPERM;
35118
35119 @@ -434,6 +446,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
35120 int err;
35121 unsigned int in_len, out_len, bytes, opcode, cmdlen;
35122 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
35123 + unsigned char tmpcmd[sizeof(rq->__cmd)];
35124 + unsigned char *cmdptr;
35125
35126 if (!sic)
35127 return -EINVAL;
35128 @@ -467,9 +481,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
35129 */
35130 err = -EFAULT;
35131 rq->cmd_len = cmdlen;
35132 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
35133 +
35134 + if (rq->cmd != rq->__cmd)
35135 + cmdptr = rq->cmd;
35136 + else
35137 + cmdptr = tmpcmd;
35138 +
35139 + if (copy_from_user(cmdptr, sic->data, cmdlen))
35140 goto error;
35141
35142 + if (rq->cmd != cmdptr)
35143 + memcpy(rq->cmd, cmdptr, cmdlen);
35144 +
35145 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
35146 goto error;
35147
35148 diff --git a/crypto/cryptd.c b/crypto/cryptd.c
35149 index 7bdd61b..afec999 100644
35150 --- a/crypto/cryptd.c
35151 +++ b/crypto/cryptd.c
35152 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
35153
35154 struct cryptd_blkcipher_request_ctx {
35155 crypto_completion_t complete;
35156 -};
35157 +} __no_const;
35158
35159 struct cryptd_hash_ctx {
35160 struct crypto_shash *child;
35161 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
35162
35163 struct cryptd_aead_request_ctx {
35164 crypto_completion_t complete;
35165 -};
35166 +} __no_const;
35167
35168 static void cryptd_queue_worker(struct work_struct *work);
35169
35170 diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
35171 index f8c920c..ab2cb5a 100644
35172 --- a/crypto/pcrypt.c
35173 +++ b/crypto/pcrypt.c
35174 @@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
35175 int ret;
35176
35177 pinst->kobj.kset = pcrypt_kset;
35178 - ret = kobject_add(&pinst->kobj, NULL, name);
35179 + ret = kobject_add(&pinst->kobj, NULL, "%s", name);
35180 if (!ret)
35181 kobject_uevent(&pinst->kobj, KOBJ_ADD);
35182
35183 diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
35184 index f220d64..d359ad6 100644
35185 --- a/drivers/acpi/apei/apei-internal.h
35186 +++ b/drivers/acpi/apei/apei-internal.h
35187 @@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
35188 struct apei_exec_ins_type {
35189 u32 flags;
35190 apei_exec_ins_func_t run;
35191 -};
35192 +} __do_const;
35193
35194 struct apei_exec_context {
35195 u32 ip;
35196 diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
35197 index 33dc6a0..4b24b47 100644
35198 --- a/drivers/acpi/apei/cper.c
35199 +++ b/drivers/acpi/apei/cper.c
35200 @@ -39,12 +39,12 @@
35201 */
35202 u64 cper_next_record_id(void)
35203 {
35204 - static atomic64_t seq;
35205 + static atomic64_unchecked_t seq;
35206
35207 - if (!atomic64_read(&seq))
35208 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
35209 + if (!atomic64_read_unchecked(&seq))
35210 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
35211
35212 - return atomic64_inc_return(&seq);
35213 + return atomic64_inc_return_unchecked(&seq);
35214 }
35215 EXPORT_SYMBOL_GPL(cper_next_record_id);
35216
35217 diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
35218 index 8ec37bb..b0716e5 100644
35219 --- a/drivers/acpi/apei/ghes.c
35220 +++ b/drivers/acpi/apei/ghes.c
35221 @@ -498,7 +498,7 @@ static void __ghes_print_estatus(const char *pfx,
35222 const struct acpi_hest_generic *generic,
35223 const struct acpi_hest_generic_status *estatus)
35224 {
35225 - static atomic_t seqno;
35226 + static atomic_unchecked_t seqno;
35227 unsigned int curr_seqno;
35228 char pfx_seq[64];
35229
35230 @@ -509,7 +509,7 @@ static void __ghes_print_estatus(const char *pfx,
35231 else
35232 pfx = KERN_ERR;
35233 }
35234 - curr_seqno = atomic_inc_return(&seqno);
35235 + curr_seqno = atomic_inc_return_unchecked(&seqno);
35236 snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
35237 printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
35238 pfx_seq, generic->header.source_id);
35239 diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
35240 index a83e3c6..c3d617f 100644
35241 --- a/drivers/acpi/bgrt.c
35242 +++ b/drivers/acpi/bgrt.c
35243 @@ -86,8 +86,10 @@ static int __init bgrt_init(void)
35244 if (!bgrt_image)
35245 return -ENODEV;
35246
35247 - bin_attr_image.private = bgrt_image;
35248 - bin_attr_image.size = bgrt_image_size;
35249 + pax_open_kernel();
35250 + *(void **)&bin_attr_image.private = bgrt_image;
35251 + *(size_t *)&bin_attr_image.size = bgrt_image_size;
35252 + pax_close_kernel();
35253
35254 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
35255 if (!bgrt_kobj)
35256 diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
35257 index 9515f18..4b149c9 100644
35258 --- a/drivers/acpi/blacklist.c
35259 +++ b/drivers/acpi/blacklist.c
35260 @@ -52,7 +52,7 @@ struct acpi_blacklist_item {
35261 u32 is_critical_error;
35262 };
35263
35264 -static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
35265 +static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
35266
35267 /*
35268 * POLICY: If *anything* doesn't work, put it on the blacklist.
35269 @@ -199,7 +199,7 @@ static int __init dmi_disable_osi_win8(const struct dmi_system_id *d)
35270 return 0;
35271 }
35272
35273 -static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
35274 +static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
35275 {
35276 .callback = dmi_disable_osi_vista,
35277 .ident = "Fujitsu Siemens",
35278 diff --git a/drivers/acpi/custom_method.c b/drivers/acpi/custom_method.c
35279 index 12b62f2..dc2aac8 100644
35280 --- a/drivers/acpi/custom_method.c
35281 +++ b/drivers/acpi/custom_method.c
35282 @@ -29,6 +29,10 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf,
35283 struct acpi_table_header table;
35284 acpi_status status;
35285
35286 +#ifdef CONFIG_GRKERNSEC_KMEM
35287 + return -EPERM;
35288 +#endif
35289 +
35290 if (!(*ppos)) {
35291 /* parse the table header to get the table length */
35292 if (count <= sizeof(struct acpi_table_header))
35293 diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
35294 index c7414a5..d5afd71 100644
35295 --- a/drivers/acpi/processor_idle.c
35296 +++ b/drivers/acpi/processor_idle.c
35297 @@ -966,7 +966,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
35298 {
35299 int i, count = CPUIDLE_DRIVER_STATE_START;
35300 struct acpi_processor_cx *cx;
35301 - struct cpuidle_state *state;
35302 + cpuidle_state_no_const *state;
35303 struct cpuidle_driver *drv = &acpi_idle_driver;
35304
35305 if (!pr->flags.power_setup_done)
35306 diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
35307 index 05306a5..733d1f0 100644
35308 --- a/drivers/acpi/sysfs.c
35309 +++ b/drivers/acpi/sysfs.c
35310 @@ -423,11 +423,11 @@ static u32 num_counters;
35311 static struct attribute **all_attrs;
35312 static u32 acpi_gpe_count;
35313
35314 -static struct attribute_group interrupt_stats_attr_group = {
35315 +static attribute_group_no_const interrupt_stats_attr_group = {
35316 .name = "interrupts",
35317 };
35318
35319 -static struct kobj_attribute *counter_attrs;
35320 +static kobj_attribute_no_const *counter_attrs;
35321
35322 static void delete_gpe_attr_array(void)
35323 {
35324 diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
35325 index cfb7447..98f2149 100644
35326 --- a/drivers/ata/libahci.c
35327 +++ b/drivers/ata/libahci.c
35328 @@ -1239,7 +1239,7 @@ int ahci_kick_engine(struct ata_port *ap)
35329 }
35330 EXPORT_SYMBOL_GPL(ahci_kick_engine);
35331
35332 -static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
35333 +static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
35334 struct ata_taskfile *tf, int is_cmd, u16 flags,
35335 unsigned long timeout_msec)
35336 {
35337 diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
35338 index 81a94a3..b711c74 100644
35339 --- a/drivers/ata/libata-core.c
35340 +++ b/drivers/ata/libata-core.c
35341 @@ -98,7 +98,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
35342 static void ata_dev_xfermask(struct ata_device *dev);
35343 static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
35344
35345 -atomic_t ata_print_id = ATOMIC_INIT(0);
35346 +atomic_unchecked_t ata_print_id = ATOMIC_INIT(0);
35347
35348 struct ata_force_param {
35349 const char *name;
35350 @@ -4809,7 +4809,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
35351 struct ata_port *ap;
35352 unsigned int tag;
35353
35354 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35355 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35356 ap = qc->ap;
35357
35358 qc->flags = 0;
35359 @@ -4825,7 +4825,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
35360 struct ata_port *ap;
35361 struct ata_link *link;
35362
35363 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35364 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
35365 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
35366 ap = qc->ap;
35367 link = qc->dev->link;
35368 @@ -5944,6 +5944,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
35369 return;
35370
35371 spin_lock(&lock);
35372 + pax_open_kernel();
35373
35374 for (cur = ops->inherits; cur; cur = cur->inherits) {
35375 void **inherit = (void **)cur;
35376 @@ -5957,8 +5958,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
35377 if (IS_ERR(*pp))
35378 *pp = NULL;
35379
35380 - ops->inherits = NULL;
35381 + *(struct ata_port_operations **)&ops->inherits = NULL;
35382
35383 + pax_close_kernel();
35384 spin_unlock(&lock);
35385 }
35386
35387 @@ -6151,7 +6153,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
35388
35389 /* give ports names and add SCSI hosts */
35390 for (i = 0; i < host->n_ports; i++) {
35391 - host->ports[i]->print_id = atomic_inc_return(&ata_print_id);
35392 + host->ports[i]->print_id = atomic_inc_return_unchecked(&ata_print_id);
35393 host->ports[i]->local_port_no = i + 1;
35394 }
35395
35396 diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
35397 index ab58556..ed19dd2 100644
35398 --- a/drivers/ata/libata-scsi.c
35399 +++ b/drivers/ata/libata-scsi.c
35400 @@ -4114,7 +4114,7 @@ int ata_sas_port_init(struct ata_port *ap)
35401
35402 if (rc)
35403 return rc;
35404 - ap->print_id = atomic_inc_return(&ata_print_id);
35405 + ap->print_id = atomic_inc_return_unchecked(&ata_print_id);
35406 return 0;
35407 }
35408 EXPORT_SYMBOL_GPL(ata_sas_port_init);
35409 diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h
35410 index 45b5ab3..98446b8 100644
35411 --- a/drivers/ata/libata.h
35412 +++ b/drivers/ata/libata.h
35413 @@ -53,7 +53,7 @@ enum {
35414 ATA_DNXFER_QUIET = (1 << 31),
35415 };
35416
35417 -extern atomic_t ata_print_id;
35418 +extern atomic_unchecked_t ata_print_id;
35419 extern int atapi_passthru16;
35420 extern int libata_fua;
35421 extern int libata_noacpi;
35422 diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
35423 index 853f610..97d24da 100644
35424 --- a/drivers/ata/pata_arasan_cf.c
35425 +++ b/drivers/ata/pata_arasan_cf.c
35426 @@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
35427 /* Handle platform specific quirks */
35428 if (quirk) {
35429 if (quirk & CF_BROKEN_PIO) {
35430 - ap->ops->set_piomode = NULL;
35431 + pax_open_kernel();
35432 + *(void **)&ap->ops->set_piomode = NULL;
35433 + pax_close_kernel();
35434 ap->pio_mask = 0;
35435 }
35436 if (quirk & CF_BROKEN_MWDMA)
35437 diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
35438 index f9b983a..887b9d8 100644
35439 --- a/drivers/atm/adummy.c
35440 +++ b/drivers/atm/adummy.c
35441 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
35442 vcc->pop(vcc, skb);
35443 else
35444 dev_kfree_skb_any(skb);
35445 - atomic_inc(&vcc->stats->tx);
35446 + atomic_inc_unchecked(&vcc->stats->tx);
35447
35448 return 0;
35449 }
35450 diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
35451 index 62a7607..cc4be104 100644
35452 --- a/drivers/atm/ambassador.c
35453 +++ b/drivers/atm/ambassador.c
35454 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
35455 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
35456
35457 // VC layer stats
35458 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35459 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35460
35461 // free the descriptor
35462 kfree (tx_descr);
35463 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
35464 dump_skb ("<<<", vc, skb);
35465
35466 // VC layer stats
35467 - atomic_inc(&atm_vcc->stats->rx);
35468 + atomic_inc_unchecked(&atm_vcc->stats->rx);
35469 __net_timestamp(skb);
35470 // end of our responsibility
35471 atm_vcc->push (atm_vcc, skb);
35472 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
35473 } else {
35474 PRINTK (KERN_INFO, "dropped over-size frame");
35475 // should we count this?
35476 - atomic_inc(&atm_vcc->stats->rx_drop);
35477 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35478 }
35479
35480 } else {
35481 @@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
35482 }
35483
35484 if (check_area (skb->data, skb->len)) {
35485 - atomic_inc(&atm_vcc->stats->tx_err);
35486 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
35487 return -ENOMEM; // ?
35488 }
35489
35490 diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
35491 index 0e3f8f9..765a7a5 100644
35492 --- a/drivers/atm/atmtcp.c
35493 +++ b/drivers/atm/atmtcp.c
35494 @@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
35495 if (vcc->pop) vcc->pop(vcc,skb);
35496 else dev_kfree_skb(skb);
35497 if (dev_data) return 0;
35498 - atomic_inc(&vcc->stats->tx_err);
35499 + atomic_inc_unchecked(&vcc->stats->tx_err);
35500 return -ENOLINK;
35501 }
35502 size = skb->len+sizeof(struct atmtcp_hdr);
35503 @@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
35504 if (!new_skb) {
35505 if (vcc->pop) vcc->pop(vcc,skb);
35506 else dev_kfree_skb(skb);
35507 - atomic_inc(&vcc->stats->tx_err);
35508 + atomic_inc_unchecked(&vcc->stats->tx_err);
35509 return -ENOBUFS;
35510 }
35511 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
35512 @@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
35513 if (vcc->pop) vcc->pop(vcc,skb);
35514 else dev_kfree_skb(skb);
35515 out_vcc->push(out_vcc,new_skb);
35516 - atomic_inc(&vcc->stats->tx);
35517 - atomic_inc(&out_vcc->stats->rx);
35518 + atomic_inc_unchecked(&vcc->stats->tx);
35519 + atomic_inc_unchecked(&out_vcc->stats->rx);
35520 return 0;
35521 }
35522
35523 @@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
35524 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
35525 read_unlock(&vcc_sklist_lock);
35526 if (!out_vcc) {
35527 - atomic_inc(&vcc->stats->tx_err);
35528 + atomic_inc_unchecked(&vcc->stats->tx_err);
35529 goto done;
35530 }
35531 skb_pull(skb,sizeof(struct atmtcp_hdr));
35532 @@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
35533 __net_timestamp(new_skb);
35534 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
35535 out_vcc->push(out_vcc,new_skb);
35536 - atomic_inc(&vcc->stats->tx);
35537 - atomic_inc(&out_vcc->stats->rx);
35538 + atomic_inc_unchecked(&vcc->stats->tx);
35539 + atomic_inc_unchecked(&out_vcc->stats->rx);
35540 done:
35541 if (vcc->pop) vcc->pop(vcc,skb);
35542 else dev_kfree_skb(skb);
35543 diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
35544 index b1955ba..b179940 100644
35545 --- a/drivers/atm/eni.c
35546 +++ b/drivers/atm/eni.c
35547 @@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
35548 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
35549 vcc->dev->number);
35550 length = 0;
35551 - atomic_inc(&vcc->stats->rx_err);
35552 + atomic_inc_unchecked(&vcc->stats->rx_err);
35553 }
35554 else {
35555 length = ATM_CELL_SIZE-1; /* no HEC */
35556 @@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
35557 size);
35558 }
35559 eff = length = 0;
35560 - atomic_inc(&vcc->stats->rx_err);
35561 + atomic_inc_unchecked(&vcc->stats->rx_err);
35562 }
35563 else {
35564 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
35565 @@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
35566 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
35567 vcc->dev->number,vcc->vci,length,size << 2,descr);
35568 length = eff = 0;
35569 - atomic_inc(&vcc->stats->rx_err);
35570 + atomic_inc_unchecked(&vcc->stats->rx_err);
35571 }
35572 }
35573 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
35574 @@ -767,7 +767,7 @@ rx_dequeued++;
35575 vcc->push(vcc,skb);
35576 pushed++;
35577 }
35578 - atomic_inc(&vcc->stats->rx);
35579 + atomic_inc_unchecked(&vcc->stats->rx);
35580 }
35581 wake_up(&eni_dev->rx_wait);
35582 }
35583 @@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
35584 PCI_DMA_TODEVICE);
35585 if (vcc->pop) vcc->pop(vcc,skb);
35586 else dev_kfree_skb_irq(skb);
35587 - atomic_inc(&vcc->stats->tx);
35588 + atomic_inc_unchecked(&vcc->stats->tx);
35589 wake_up(&eni_dev->tx_wait);
35590 dma_complete++;
35591 }
35592 diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
35593 index b41c948..a002b17 100644
35594 --- a/drivers/atm/firestream.c
35595 +++ b/drivers/atm/firestream.c
35596 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
35597 }
35598 }
35599
35600 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35601 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35602
35603 fs_dprintk (FS_DEBUG_TXMEM, "i");
35604 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
35605 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35606 #endif
35607 skb_put (skb, qe->p1 & 0xffff);
35608 ATM_SKB(skb)->vcc = atm_vcc;
35609 - atomic_inc(&atm_vcc->stats->rx);
35610 + atomic_inc_unchecked(&atm_vcc->stats->rx);
35611 __net_timestamp(skb);
35612 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
35613 atm_vcc->push (atm_vcc, skb);
35614 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
35615 kfree (pe);
35616 }
35617 if (atm_vcc)
35618 - atomic_inc(&atm_vcc->stats->rx_drop);
35619 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35620 break;
35621 case 0x1f: /* Reassembly abort: no buffers. */
35622 /* Silently increment error counter. */
35623 if (atm_vcc)
35624 - atomic_inc(&atm_vcc->stats->rx_drop);
35625 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
35626 break;
35627 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
35628 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
35629 diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
35630 index 204814e..cede831 100644
35631 --- a/drivers/atm/fore200e.c
35632 +++ b/drivers/atm/fore200e.c
35633 @@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
35634 #endif
35635 /* check error condition */
35636 if (*entry->status & STATUS_ERROR)
35637 - atomic_inc(&vcc->stats->tx_err);
35638 + atomic_inc_unchecked(&vcc->stats->tx_err);
35639 else
35640 - atomic_inc(&vcc->stats->tx);
35641 + atomic_inc_unchecked(&vcc->stats->tx);
35642 }
35643 }
35644
35645 @@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35646 if (skb == NULL) {
35647 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
35648
35649 - atomic_inc(&vcc->stats->rx_drop);
35650 + atomic_inc_unchecked(&vcc->stats->rx_drop);
35651 return -ENOMEM;
35652 }
35653
35654 @@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
35655
35656 dev_kfree_skb_any(skb);
35657
35658 - atomic_inc(&vcc->stats->rx_drop);
35659 + atomic_inc_unchecked(&vcc->stats->rx_drop);
35660 return -ENOMEM;
35661 }
35662
35663 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35664
35665 vcc->push(vcc, skb);
35666 - atomic_inc(&vcc->stats->rx);
35667 + atomic_inc_unchecked(&vcc->stats->rx);
35668
35669 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
35670
35671 @@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
35672 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
35673 fore200e->atm_dev->number,
35674 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
35675 - atomic_inc(&vcc->stats->rx_err);
35676 + atomic_inc_unchecked(&vcc->stats->rx_err);
35677 }
35678 }
35679
35680 @@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
35681 goto retry_here;
35682 }
35683
35684 - atomic_inc(&vcc->stats->tx_err);
35685 + atomic_inc_unchecked(&vcc->stats->tx_err);
35686
35687 fore200e->tx_sat++;
35688 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
35689 diff --git a/drivers/atm/he.c b/drivers/atm/he.c
35690 index 8557adc..3fb5d55 100644
35691 --- a/drivers/atm/he.c
35692 +++ b/drivers/atm/he.c
35693 @@ -1691,7 +1691,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35694
35695 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
35696 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
35697 - atomic_inc(&vcc->stats->rx_drop);
35698 + atomic_inc_unchecked(&vcc->stats->rx_drop);
35699 goto return_host_buffers;
35700 }
35701
35702 @@ -1718,7 +1718,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35703 RBRQ_LEN_ERR(he_dev->rbrq_head)
35704 ? "LEN_ERR" : "",
35705 vcc->vpi, vcc->vci);
35706 - atomic_inc(&vcc->stats->rx_err);
35707 + atomic_inc_unchecked(&vcc->stats->rx_err);
35708 goto return_host_buffers;
35709 }
35710
35711 @@ -1770,7 +1770,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
35712 vcc->push(vcc, skb);
35713 spin_lock(&he_dev->global_lock);
35714
35715 - atomic_inc(&vcc->stats->rx);
35716 + atomic_inc_unchecked(&vcc->stats->rx);
35717
35718 return_host_buffers:
35719 ++pdus_assembled;
35720 @@ -2096,7 +2096,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
35721 tpd->vcc->pop(tpd->vcc, tpd->skb);
35722 else
35723 dev_kfree_skb_any(tpd->skb);
35724 - atomic_inc(&tpd->vcc->stats->tx_err);
35725 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
35726 }
35727 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
35728 return;
35729 @@ -2508,7 +2508,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35730 vcc->pop(vcc, skb);
35731 else
35732 dev_kfree_skb_any(skb);
35733 - atomic_inc(&vcc->stats->tx_err);
35734 + atomic_inc_unchecked(&vcc->stats->tx_err);
35735 return -EINVAL;
35736 }
35737
35738 @@ -2519,7 +2519,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35739 vcc->pop(vcc, skb);
35740 else
35741 dev_kfree_skb_any(skb);
35742 - atomic_inc(&vcc->stats->tx_err);
35743 + atomic_inc_unchecked(&vcc->stats->tx_err);
35744 return -EINVAL;
35745 }
35746 #endif
35747 @@ -2531,7 +2531,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35748 vcc->pop(vcc, skb);
35749 else
35750 dev_kfree_skb_any(skb);
35751 - atomic_inc(&vcc->stats->tx_err);
35752 + atomic_inc_unchecked(&vcc->stats->tx_err);
35753 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35754 return -ENOMEM;
35755 }
35756 @@ -2573,7 +2573,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35757 vcc->pop(vcc, skb);
35758 else
35759 dev_kfree_skb_any(skb);
35760 - atomic_inc(&vcc->stats->tx_err);
35761 + atomic_inc_unchecked(&vcc->stats->tx_err);
35762 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35763 return -ENOMEM;
35764 }
35765 @@ -2604,7 +2604,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
35766 __enqueue_tpd(he_dev, tpd, cid);
35767 spin_unlock_irqrestore(&he_dev->global_lock, flags);
35768
35769 - atomic_inc(&vcc->stats->tx);
35770 + atomic_inc_unchecked(&vcc->stats->tx);
35771
35772 return 0;
35773 }
35774 diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
35775 index 1dc0519..1aadaf7 100644
35776 --- a/drivers/atm/horizon.c
35777 +++ b/drivers/atm/horizon.c
35778 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
35779 {
35780 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
35781 // VC layer stats
35782 - atomic_inc(&vcc->stats->rx);
35783 + atomic_inc_unchecked(&vcc->stats->rx);
35784 __net_timestamp(skb);
35785 // end of our responsibility
35786 vcc->push (vcc, skb);
35787 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
35788 dev->tx_iovec = NULL;
35789
35790 // VC layer stats
35791 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
35792 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
35793
35794 // free the skb
35795 hrz_kfree_skb (skb);
35796 diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
35797 index 1bdf104..9dc44b1 100644
35798 --- a/drivers/atm/idt77252.c
35799 +++ b/drivers/atm/idt77252.c
35800 @@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
35801 else
35802 dev_kfree_skb(skb);
35803
35804 - atomic_inc(&vcc->stats->tx);
35805 + atomic_inc_unchecked(&vcc->stats->tx);
35806 }
35807
35808 atomic_dec(&scq->used);
35809 @@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35810 if ((sb = dev_alloc_skb(64)) == NULL) {
35811 printk("%s: Can't allocate buffers for aal0.\n",
35812 card->name);
35813 - atomic_add(i, &vcc->stats->rx_drop);
35814 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
35815 break;
35816 }
35817 if (!atm_charge(vcc, sb->truesize)) {
35818 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
35819 card->name);
35820 - atomic_add(i - 1, &vcc->stats->rx_drop);
35821 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
35822 dev_kfree_skb(sb);
35823 break;
35824 }
35825 @@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35826 ATM_SKB(sb)->vcc = vcc;
35827 __net_timestamp(sb);
35828 vcc->push(vcc, sb);
35829 - atomic_inc(&vcc->stats->rx);
35830 + atomic_inc_unchecked(&vcc->stats->rx);
35831
35832 cell += ATM_CELL_PAYLOAD;
35833 }
35834 @@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35835 "(CDC: %08x)\n",
35836 card->name, len, rpp->len, readl(SAR_REG_CDC));
35837 recycle_rx_pool_skb(card, rpp);
35838 - atomic_inc(&vcc->stats->rx_err);
35839 + atomic_inc_unchecked(&vcc->stats->rx_err);
35840 return;
35841 }
35842 if (stat & SAR_RSQE_CRC) {
35843 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
35844 recycle_rx_pool_skb(card, rpp);
35845 - atomic_inc(&vcc->stats->rx_err);
35846 + atomic_inc_unchecked(&vcc->stats->rx_err);
35847 return;
35848 }
35849 if (skb_queue_len(&rpp->queue) > 1) {
35850 @@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35851 RXPRINTK("%s: Can't alloc RX skb.\n",
35852 card->name);
35853 recycle_rx_pool_skb(card, rpp);
35854 - atomic_inc(&vcc->stats->rx_err);
35855 + atomic_inc_unchecked(&vcc->stats->rx_err);
35856 return;
35857 }
35858 if (!atm_charge(vcc, skb->truesize)) {
35859 @@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35860 __net_timestamp(skb);
35861
35862 vcc->push(vcc, skb);
35863 - atomic_inc(&vcc->stats->rx);
35864 + atomic_inc_unchecked(&vcc->stats->rx);
35865
35866 return;
35867 }
35868 @@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
35869 __net_timestamp(skb);
35870
35871 vcc->push(vcc, skb);
35872 - atomic_inc(&vcc->stats->rx);
35873 + atomic_inc_unchecked(&vcc->stats->rx);
35874
35875 if (skb->truesize > SAR_FB_SIZE_3)
35876 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
35877 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
35878 if (vcc->qos.aal != ATM_AAL0) {
35879 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
35880 card->name, vpi, vci);
35881 - atomic_inc(&vcc->stats->rx_drop);
35882 + atomic_inc_unchecked(&vcc->stats->rx_drop);
35883 goto drop;
35884 }
35885
35886 if ((sb = dev_alloc_skb(64)) == NULL) {
35887 printk("%s: Can't allocate buffers for AAL0.\n",
35888 card->name);
35889 - atomic_inc(&vcc->stats->rx_err);
35890 + atomic_inc_unchecked(&vcc->stats->rx_err);
35891 goto drop;
35892 }
35893
35894 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
35895 ATM_SKB(sb)->vcc = vcc;
35896 __net_timestamp(sb);
35897 vcc->push(vcc, sb);
35898 - atomic_inc(&vcc->stats->rx);
35899 + atomic_inc_unchecked(&vcc->stats->rx);
35900
35901 drop:
35902 skb_pull(queue, 64);
35903 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35904
35905 if (vc == NULL) {
35906 printk("%s: NULL connection in send().\n", card->name);
35907 - atomic_inc(&vcc->stats->tx_err);
35908 + atomic_inc_unchecked(&vcc->stats->tx_err);
35909 dev_kfree_skb(skb);
35910 return -EINVAL;
35911 }
35912 if (!test_bit(VCF_TX, &vc->flags)) {
35913 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
35914 - atomic_inc(&vcc->stats->tx_err);
35915 + atomic_inc_unchecked(&vcc->stats->tx_err);
35916 dev_kfree_skb(skb);
35917 return -EINVAL;
35918 }
35919 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35920 break;
35921 default:
35922 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
35923 - atomic_inc(&vcc->stats->tx_err);
35924 + atomic_inc_unchecked(&vcc->stats->tx_err);
35925 dev_kfree_skb(skb);
35926 return -EINVAL;
35927 }
35928
35929 if (skb_shinfo(skb)->nr_frags != 0) {
35930 printk("%s: No scatter-gather yet.\n", card->name);
35931 - atomic_inc(&vcc->stats->tx_err);
35932 + atomic_inc_unchecked(&vcc->stats->tx_err);
35933 dev_kfree_skb(skb);
35934 return -EINVAL;
35935 }
35936 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
35937
35938 err = queue_skb(card, vc, skb, oam);
35939 if (err) {
35940 - atomic_inc(&vcc->stats->tx_err);
35941 + atomic_inc_unchecked(&vcc->stats->tx_err);
35942 dev_kfree_skb(skb);
35943 return err;
35944 }
35945 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
35946 skb = dev_alloc_skb(64);
35947 if (!skb) {
35948 printk("%s: Out of memory in send_oam().\n", card->name);
35949 - atomic_inc(&vcc->stats->tx_err);
35950 + atomic_inc_unchecked(&vcc->stats->tx_err);
35951 return -ENOMEM;
35952 }
35953 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
35954 diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
35955 index 4217f29..88f547a 100644
35956 --- a/drivers/atm/iphase.c
35957 +++ b/drivers/atm/iphase.c
35958 @@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
35959 status = (u_short) (buf_desc_ptr->desc_mode);
35960 if (status & (RX_CER | RX_PTE | RX_OFL))
35961 {
35962 - atomic_inc(&vcc->stats->rx_err);
35963 + atomic_inc_unchecked(&vcc->stats->rx_err);
35964 IF_ERR(printk("IA: bad packet, dropping it");)
35965 if (status & RX_CER) {
35966 IF_ERR(printk(" cause: packet CRC error\n");)
35967 @@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
35968 len = dma_addr - buf_addr;
35969 if (len > iadev->rx_buf_sz) {
35970 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
35971 - atomic_inc(&vcc->stats->rx_err);
35972 + atomic_inc_unchecked(&vcc->stats->rx_err);
35973 goto out_free_desc;
35974 }
35975
35976 @@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35977 ia_vcc = INPH_IA_VCC(vcc);
35978 if (ia_vcc == NULL)
35979 {
35980 - atomic_inc(&vcc->stats->rx_err);
35981 + atomic_inc_unchecked(&vcc->stats->rx_err);
35982 atm_return(vcc, skb->truesize);
35983 dev_kfree_skb_any(skb);
35984 goto INCR_DLE;
35985 @@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35986 if ((length > iadev->rx_buf_sz) || (length >
35987 (skb->len - sizeof(struct cpcs_trailer))))
35988 {
35989 - atomic_inc(&vcc->stats->rx_err);
35990 + atomic_inc_unchecked(&vcc->stats->rx_err);
35991 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
35992 length, skb->len);)
35993 atm_return(vcc, skb->truesize);
35994 @@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
35995
35996 IF_RX(printk("rx_dle_intr: skb push");)
35997 vcc->push(vcc,skb);
35998 - atomic_inc(&vcc->stats->rx);
35999 + atomic_inc_unchecked(&vcc->stats->rx);
36000 iadev->rx_pkt_cnt++;
36001 }
36002 INCR_DLE:
36003 @@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
36004 {
36005 struct k_sonet_stats *stats;
36006 stats = &PRIV(_ia_dev[board])->sonet_stats;
36007 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
36008 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
36009 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
36010 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
36011 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
36012 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
36013 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
36014 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
36015 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
36016 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
36017 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
36018 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
36019 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
36020 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
36021 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
36022 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
36023 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
36024 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
36025 }
36026 ia_cmds.status = 0;
36027 break;
36028 @@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
36029 if ((desc == 0) || (desc > iadev->num_tx_desc))
36030 {
36031 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
36032 - atomic_inc(&vcc->stats->tx);
36033 + atomic_inc_unchecked(&vcc->stats->tx);
36034 if (vcc->pop)
36035 vcc->pop(vcc, skb);
36036 else
36037 @@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
36038 ATM_DESC(skb) = vcc->vci;
36039 skb_queue_tail(&iadev->tx_dma_q, skb);
36040
36041 - atomic_inc(&vcc->stats->tx);
36042 + atomic_inc_unchecked(&vcc->stats->tx);
36043 iadev->tx_pkt_cnt++;
36044 /* Increment transaction counter */
36045 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
36046
36047 #if 0
36048 /* add flow control logic */
36049 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
36050 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
36051 if (iavcc->vc_desc_cnt > 10) {
36052 vcc->tx_quota = vcc->tx_quota * 3 / 4;
36053 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
36054 diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
36055 index fa7d701..1e404c7 100644
36056 --- a/drivers/atm/lanai.c
36057 +++ b/drivers/atm/lanai.c
36058 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
36059 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
36060 lanai_endtx(lanai, lvcc);
36061 lanai_free_skb(lvcc->tx.atmvcc, skb);
36062 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
36063 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
36064 }
36065
36066 /* Try to fill the buffer - don't call unless there is backlog */
36067 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
36068 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
36069 __net_timestamp(skb);
36070 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
36071 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
36072 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
36073 out:
36074 lvcc->rx.buf.ptr = end;
36075 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
36076 @@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36077 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
36078 "vcc %d\n", lanai->number, (unsigned int) s, vci);
36079 lanai->stats.service_rxnotaal5++;
36080 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36081 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36082 return 0;
36083 }
36084 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
36085 @@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36086 int bytes;
36087 read_unlock(&vcc_sklist_lock);
36088 DPRINTK("got trashed rx pdu on vci %d\n", vci);
36089 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36090 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36091 lvcc->stats.x.aal5.service_trash++;
36092 bytes = (SERVICE_GET_END(s) * 16) -
36093 (((unsigned long) lvcc->rx.buf.ptr) -
36094 @@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36095 }
36096 if (s & SERVICE_STREAM) {
36097 read_unlock(&vcc_sklist_lock);
36098 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36099 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36100 lvcc->stats.x.aal5.service_stream++;
36101 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
36102 "PDU on VCI %d!\n", lanai->number, vci);
36103 @@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
36104 return 0;
36105 }
36106 DPRINTK("got rx crc error on vci %d\n", vci);
36107 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
36108 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
36109 lvcc->stats.x.aal5.service_rxcrc++;
36110 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
36111 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
36112 diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
36113 index 5aca5f4..ce3a6b0 100644
36114 --- a/drivers/atm/nicstar.c
36115 +++ b/drivers/atm/nicstar.c
36116 @@ -1640,7 +1640,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36117 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
36118 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
36119 card->index);
36120 - atomic_inc(&vcc->stats->tx_err);
36121 + atomic_inc_unchecked(&vcc->stats->tx_err);
36122 dev_kfree_skb_any(skb);
36123 return -EINVAL;
36124 }
36125 @@ -1648,7 +1648,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36126 if (!vc->tx) {
36127 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
36128 card->index);
36129 - atomic_inc(&vcc->stats->tx_err);
36130 + atomic_inc_unchecked(&vcc->stats->tx_err);
36131 dev_kfree_skb_any(skb);
36132 return -EINVAL;
36133 }
36134 @@ -1656,14 +1656,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36135 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
36136 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
36137 card->index);
36138 - atomic_inc(&vcc->stats->tx_err);
36139 + atomic_inc_unchecked(&vcc->stats->tx_err);
36140 dev_kfree_skb_any(skb);
36141 return -EINVAL;
36142 }
36143
36144 if (skb_shinfo(skb)->nr_frags != 0) {
36145 printk("nicstar%d: No scatter-gather yet.\n", card->index);
36146 - atomic_inc(&vcc->stats->tx_err);
36147 + atomic_inc_unchecked(&vcc->stats->tx_err);
36148 dev_kfree_skb_any(skb);
36149 return -EINVAL;
36150 }
36151 @@ -1711,11 +1711,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
36152 }
36153
36154 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
36155 - atomic_inc(&vcc->stats->tx_err);
36156 + atomic_inc_unchecked(&vcc->stats->tx_err);
36157 dev_kfree_skb_any(skb);
36158 return -EIO;
36159 }
36160 - atomic_inc(&vcc->stats->tx);
36161 + atomic_inc_unchecked(&vcc->stats->tx);
36162
36163 return 0;
36164 }
36165 @@ -2032,14 +2032,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36166 printk
36167 ("nicstar%d: Can't allocate buffers for aal0.\n",
36168 card->index);
36169 - atomic_add(i, &vcc->stats->rx_drop);
36170 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
36171 break;
36172 }
36173 if (!atm_charge(vcc, sb->truesize)) {
36174 RXPRINTK
36175 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
36176 card->index);
36177 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
36178 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
36179 dev_kfree_skb_any(sb);
36180 break;
36181 }
36182 @@ -2054,7 +2054,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36183 ATM_SKB(sb)->vcc = vcc;
36184 __net_timestamp(sb);
36185 vcc->push(vcc, sb);
36186 - atomic_inc(&vcc->stats->rx);
36187 + atomic_inc_unchecked(&vcc->stats->rx);
36188 cell += ATM_CELL_PAYLOAD;
36189 }
36190
36191 @@ -2071,7 +2071,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36192 if (iovb == NULL) {
36193 printk("nicstar%d: Out of iovec buffers.\n",
36194 card->index);
36195 - atomic_inc(&vcc->stats->rx_drop);
36196 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36197 recycle_rx_buf(card, skb);
36198 return;
36199 }
36200 @@ -2095,7 +2095,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36201 small or large buffer itself. */
36202 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
36203 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
36204 - atomic_inc(&vcc->stats->rx_err);
36205 + atomic_inc_unchecked(&vcc->stats->rx_err);
36206 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36207 NS_MAX_IOVECS);
36208 NS_PRV_IOVCNT(iovb) = 0;
36209 @@ -2115,7 +2115,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36210 ("nicstar%d: Expected a small buffer, and this is not one.\n",
36211 card->index);
36212 which_list(card, skb);
36213 - atomic_inc(&vcc->stats->rx_err);
36214 + atomic_inc_unchecked(&vcc->stats->rx_err);
36215 recycle_rx_buf(card, skb);
36216 vc->rx_iov = NULL;
36217 recycle_iov_buf(card, iovb);
36218 @@ -2128,7 +2128,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36219 ("nicstar%d: Expected a large buffer, and this is not one.\n",
36220 card->index);
36221 which_list(card, skb);
36222 - atomic_inc(&vcc->stats->rx_err);
36223 + atomic_inc_unchecked(&vcc->stats->rx_err);
36224 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36225 NS_PRV_IOVCNT(iovb));
36226 vc->rx_iov = NULL;
36227 @@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36228 printk(" - PDU size mismatch.\n");
36229 else
36230 printk(".\n");
36231 - atomic_inc(&vcc->stats->rx_err);
36232 + atomic_inc_unchecked(&vcc->stats->rx_err);
36233 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
36234 NS_PRV_IOVCNT(iovb));
36235 vc->rx_iov = NULL;
36236 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36237 /* skb points to a small buffer */
36238 if (!atm_charge(vcc, skb->truesize)) {
36239 push_rxbufs(card, skb);
36240 - atomic_inc(&vcc->stats->rx_drop);
36241 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36242 } else {
36243 skb_put(skb, len);
36244 dequeue_sm_buf(card, skb);
36245 @@ -2175,7 +2175,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36246 ATM_SKB(skb)->vcc = vcc;
36247 __net_timestamp(skb);
36248 vcc->push(vcc, skb);
36249 - atomic_inc(&vcc->stats->rx);
36250 + atomic_inc_unchecked(&vcc->stats->rx);
36251 }
36252 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
36253 struct sk_buff *sb;
36254 @@ -2186,7 +2186,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36255 if (len <= NS_SMBUFSIZE) {
36256 if (!atm_charge(vcc, sb->truesize)) {
36257 push_rxbufs(card, sb);
36258 - atomic_inc(&vcc->stats->rx_drop);
36259 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36260 } else {
36261 skb_put(sb, len);
36262 dequeue_sm_buf(card, sb);
36263 @@ -2196,7 +2196,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36264 ATM_SKB(sb)->vcc = vcc;
36265 __net_timestamp(sb);
36266 vcc->push(vcc, sb);
36267 - atomic_inc(&vcc->stats->rx);
36268 + atomic_inc_unchecked(&vcc->stats->rx);
36269 }
36270
36271 push_rxbufs(card, skb);
36272 @@ -2205,7 +2205,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36273
36274 if (!atm_charge(vcc, skb->truesize)) {
36275 push_rxbufs(card, skb);
36276 - atomic_inc(&vcc->stats->rx_drop);
36277 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36278 } else {
36279 dequeue_lg_buf(card, skb);
36280 #ifdef NS_USE_DESTRUCTORS
36281 @@ -2218,7 +2218,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36282 ATM_SKB(skb)->vcc = vcc;
36283 __net_timestamp(skb);
36284 vcc->push(vcc, skb);
36285 - atomic_inc(&vcc->stats->rx);
36286 + atomic_inc_unchecked(&vcc->stats->rx);
36287 }
36288
36289 push_rxbufs(card, sb);
36290 @@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36291 printk
36292 ("nicstar%d: Out of huge buffers.\n",
36293 card->index);
36294 - atomic_inc(&vcc->stats->rx_drop);
36295 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36296 recycle_iovec_rx_bufs(card,
36297 (struct iovec *)
36298 iovb->data,
36299 @@ -2290,7 +2290,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36300 card->hbpool.count++;
36301 } else
36302 dev_kfree_skb_any(hb);
36303 - atomic_inc(&vcc->stats->rx_drop);
36304 + atomic_inc_unchecked(&vcc->stats->rx_drop);
36305 } else {
36306 /* Copy the small buffer to the huge buffer */
36307 sb = (struct sk_buff *)iov->iov_base;
36308 @@ -2327,7 +2327,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
36309 #endif /* NS_USE_DESTRUCTORS */
36310 __net_timestamp(hb);
36311 vcc->push(vcc, hb);
36312 - atomic_inc(&vcc->stats->rx);
36313 + atomic_inc_unchecked(&vcc->stats->rx);
36314 }
36315 }
36316
36317 diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
36318 index 32784d1..4a8434a 100644
36319 --- a/drivers/atm/solos-pci.c
36320 +++ b/drivers/atm/solos-pci.c
36321 @@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
36322 }
36323 atm_charge(vcc, skb->truesize);
36324 vcc->push(vcc, skb);
36325 - atomic_inc(&vcc->stats->rx);
36326 + atomic_inc_unchecked(&vcc->stats->rx);
36327 break;
36328
36329 case PKT_STATUS:
36330 @@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
36331 vcc = SKB_CB(oldskb)->vcc;
36332
36333 if (vcc) {
36334 - atomic_inc(&vcc->stats->tx);
36335 + atomic_inc_unchecked(&vcc->stats->tx);
36336 solos_pop(vcc, oldskb);
36337 } else {
36338 dev_kfree_skb_irq(oldskb);
36339 diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
36340 index 0215934..ce9f5b1 100644
36341 --- a/drivers/atm/suni.c
36342 +++ b/drivers/atm/suni.c
36343 @@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
36344
36345
36346 #define ADD_LIMITED(s,v) \
36347 - atomic_add((v),&stats->s); \
36348 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
36349 + atomic_add_unchecked((v),&stats->s); \
36350 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
36351
36352
36353 static void suni_hz(unsigned long from_timer)
36354 diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
36355 index 5120a96..e2572bd 100644
36356 --- a/drivers/atm/uPD98402.c
36357 +++ b/drivers/atm/uPD98402.c
36358 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
36359 struct sonet_stats tmp;
36360 int error = 0;
36361
36362 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
36363 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
36364 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
36365 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
36366 if (zero && !error) {
36367 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
36368
36369
36370 #define ADD_LIMITED(s,v) \
36371 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
36372 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
36373 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
36374 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
36375 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
36376 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
36377
36378
36379 static void stat_event(struct atm_dev *dev)
36380 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
36381 if (reason & uPD98402_INT_PFM) stat_event(dev);
36382 if (reason & uPD98402_INT_PCO) {
36383 (void) GET(PCOCR); /* clear interrupt cause */
36384 - atomic_add(GET(HECCT),
36385 + atomic_add_unchecked(GET(HECCT),
36386 &PRIV(dev)->sonet_stats.uncorr_hcs);
36387 }
36388 if ((reason & uPD98402_INT_RFO) &&
36389 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
36390 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
36391 uPD98402_INT_LOS),PIMR); /* enable them */
36392 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
36393 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
36394 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
36395 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
36396 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
36397 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
36398 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
36399 return 0;
36400 }
36401
36402 diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
36403 index 969c3c2..9b72956 100644
36404 --- a/drivers/atm/zatm.c
36405 +++ b/drivers/atm/zatm.c
36406 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
36407 }
36408 if (!size) {
36409 dev_kfree_skb_irq(skb);
36410 - if (vcc) atomic_inc(&vcc->stats->rx_err);
36411 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
36412 continue;
36413 }
36414 if (!atm_charge(vcc,skb->truesize)) {
36415 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
36416 skb->len = size;
36417 ATM_SKB(skb)->vcc = vcc;
36418 vcc->push(vcc,skb);
36419 - atomic_inc(&vcc->stats->rx);
36420 + atomic_inc_unchecked(&vcc->stats->rx);
36421 }
36422 zout(pos & 0xffff,MTA(mbx));
36423 #if 0 /* probably a stupid idea */
36424 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
36425 skb_queue_head(&zatm_vcc->backlog,skb);
36426 break;
36427 }
36428 - atomic_inc(&vcc->stats->tx);
36429 + atomic_inc_unchecked(&vcc->stats->tx);
36430 wake_up(&zatm_vcc->tx_wait);
36431 }
36432
36433 diff --git a/drivers/base/bus.c b/drivers/base/bus.c
36434 index 4c289ab..de1c333 100644
36435 --- a/drivers/base/bus.c
36436 +++ b/drivers/base/bus.c
36437 @@ -1193,7 +1193,7 @@ int subsys_interface_register(struct subsys_interface *sif)
36438 return -EINVAL;
36439
36440 mutex_lock(&subsys->p->mutex);
36441 - list_add_tail(&sif->node, &subsys->p->interfaces);
36442 + pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
36443 if (sif->add_dev) {
36444 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
36445 while ((dev = subsys_dev_iter_next(&iter)))
36446 @@ -1218,7 +1218,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
36447 subsys = sif->subsys;
36448
36449 mutex_lock(&subsys->p->mutex);
36450 - list_del_init(&sif->node);
36451 + pax_list_del_init((struct list_head *)&sif->node);
36452 if (sif->remove_dev) {
36453 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
36454 while ((dev = subsys_dev_iter_next(&iter)))
36455 diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
36456 index 7413d06..79155fa 100644
36457 --- a/drivers/base/devtmpfs.c
36458 +++ b/drivers/base/devtmpfs.c
36459 @@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
36460 if (!thread)
36461 return 0;
36462
36463 - err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
36464 + err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
36465 if (err)
36466 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
36467 else
36468 @@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
36469 *err = sys_unshare(CLONE_NEWNS);
36470 if (*err)
36471 goto out;
36472 - *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
36473 + *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
36474 if (*err)
36475 goto out;
36476 - sys_chdir("/.."); /* will traverse into overmounted root */
36477 - sys_chroot(".");
36478 + sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
36479 + sys_chroot((char __force_user *)".");
36480 complete(&setup_done);
36481 while (1) {
36482 spin_lock(&req_lock);
36483 diff --git a/drivers/base/node.c b/drivers/base/node.c
36484 index bc9f43b..29703b8 100644
36485 --- a/drivers/base/node.c
36486 +++ b/drivers/base/node.c
36487 @@ -620,7 +620,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
36488 struct node_attr {
36489 struct device_attribute attr;
36490 enum node_states state;
36491 -};
36492 +} __do_const;
36493
36494 static ssize_t show_node_state(struct device *dev,
36495 struct device_attribute *attr, char *buf)
36496 diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
36497 index bfb8955..42c9b9a 100644
36498 --- a/drivers/base/power/domain.c
36499 +++ b/drivers/base/power/domain.c
36500 @@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
36501 {
36502 struct cpuidle_driver *cpuidle_drv;
36503 struct gpd_cpu_data *cpu_data;
36504 - struct cpuidle_state *idle_state;
36505 + cpuidle_state_no_const *idle_state;
36506 int ret = 0;
36507
36508 if (IS_ERR_OR_NULL(genpd) || state < 0)
36509 @@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
36510 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
36511 {
36512 struct gpd_cpu_data *cpu_data;
36513 - struct cpuidle_state *idle_state;
36514 + cpuidle_state_no_const *idle_state;
36515 int ret = 0;
36516
36517 if (IS_ERR_OR_NULL(genpd))
36518 diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
36519 index 03e089a..0e9560c 100644
36520 --- a/drivers/base/power/sysfs.c
36521 +++ b/drivers/base/power/sysfs.c
36522 @@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
36523 return -EIO;
36524 }
36525 }
36526 - return sprintf(buf, p);
36527 + return sprintf(buf, "%s", p);
36528 }
36529
36530 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
36531 diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
36532 index 2d56f41..8830f19 100644
36533 --- a/drivers/base/power/wakeup.c
36534 +++ b/drivers/base/power/wakeup.c
36535 @@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
36536 * They need to be modified together atomically, so it's better to use one
36537 * atomic variable to hold them both.
36538 */
36539 -static atomic_t combined_event_count = ATOMIC_INIT(0);
36540 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
36541
36542 #define IN_PROGRESS_BITS (sizeof(int) * 4)
36543 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
36544
36545 static void split_counters(unsigned int *cnt, unsigned int *inpr)
36546 {
36547 - unsigned int comb = atomic_read(&combined_event_count);
36548 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
36549
36550 *cnt = (comb >> IN_PROGRESS_BITS);
36551 *inpr = comb & MAX_IN_PROGRESS;
36552 @@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
36553 ws->start_prevent_time = ws->last_time;
36554
36555 /* Increment the counter of events in progress. */
36556 - cec = atomic_inc_return(&combined_event_count);
36557 + cec = atomic_inc_return_unchecked(&combined_event_count);
36558
36559 trace_wakeup_source_activate(ws->name, cec);
36560 }
36561 @@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
36562 * Increment the counter of registered wakeup events and decrement the
36563 * couter of wakeup events in progress simultaneously.
36564 */
36565 - cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
36566 + cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
36567 trace_wakeup_source_deactivate(ws->name, cec);
36568
36569 split_counters(&cnt, &inpr);
36570 diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
36571 index e8d11b6..7b1b36f 100644
36572 --- a/drivers/base/syscore.c
36573 +++ b/drivers/base/syscore.c
36574 @@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
36575 void register_syscore_ops(struct syscore_ops *ops)
36576 {
36577 mutex_lock(&syscore_ops_lock);
36578 - list_add_tail(&ops->node, &syscore_ops_list);
36579 + pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
36580 mutex_unlock(&syscore_ops_lock);
36581 }
36582 EXPORT_SYMBOL_GPL(register_syscore_ops);
36583 @@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
36584 void unregister_syscore_ops(struct syscore_ops *ops)
36585 {
36586 mutex_lock(&syscore_ops_lock);
36587 - list_del(&ops->node);
36588 + pax_list_del((struct list_head *)&ops->node);
36589 mutex_unlock(&syscore_ops_lock);
36590 }
36591 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
36592 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
36593 index edfa251..1734d4d 100644
36594 --- a/drivers/block/cciss.c
36595 +++ b/drivers/block/cciss.c
36596 @@ -3011,7 +3011,7 @@ static void start_io(ctlr_info_t *h)
36597 while (!list_empty(&h->reqQ)) {
36598 c = list_entry(h->reqQ.next, CommandList_struct, list);
36599 /* can't do anything if fifo is full */
36600 - if ((h->access.fifo_full(h))) {
36601 + if ((h->access->fifo_full(h))) {
36602 dev_warn(&h->pdev->dev, "fifo full\n");
36603 break;
36604 }
36605 @@ -3021,7 +3021,7 @@ static void start_io(ctlr_info_t *h)
36606 h->Qdepth--;
36607
36608 /* Tell the controller execute command */
36609 - h->access.submit_command(h, c);
36610 + h->access->submit_command(h, c);
36611
36612 /* Put job onto the completed Q */
36613 addQ(&h->cmpQ, c);
36614 @@ -3447,17 +3447,17 @@ startio:
36615
36616 static inline unsigned long get_next_completion(ctlr_info_t *h)
36617 {
36618 - return h->access.command_completed(h);
36619 + return h->access->command_completed(h);
36620 }
36621
36622 static inline int interrupt_pending(ctlr_info_t *h)
36623 {
36624 - return h->access.intr_pending(h);
36625 + return h->access->intr_pending(h);
36626 }
36627
36628 static inline long interrupt_not_for_us(ctlr_info_t *h)
36629 {
36630 - return ((h->access.intr_pending(h) == 0) ||
36631 + return ((h->access->intr_pending(h) == 0) ||
36632 (h->interrupts_enabled == 0));
36633 }
36634
36635 @@ -3490,7 +3490,7 @@ static inline u32 next_command(ctlr_info_t *h)
36636 u32 a;
36637
36638 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
36639 - return h->access.command_completed(h);
36640 + return h->access->command_completed(h);
36641
36642 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
36643 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
36644 @@ -4047,7 +4047,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
36645 trans_support & CFGTBL_Trans_use_short_tags);
36646
36647 /* Change the access methods to the performant access methods */
36648 - h->access = SA5_performant_access;
36649 + h->access = &SA5_performant_access;
36650 h->transMethod = CFGTBL_Trans_Performant;
36651
36652 return;
36653 @@ -4327,7 +4327,7 @@ static int cciss_pci_init(ctlr_info_t *h)
36654 if (prod_index < 0)
36655 return -ENODEV;
36656 h->product_name = products[prod_index].product_name;
36657 - h->access = *(products[prod_index].access);
36658 + h->access = products[prod_index].access;
36659
36660 if (cciss_board_disabled(h)) {
36661 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
36662 @@ -5059,7 +5059,7 @@ reinit_after_soft_reset:
36663 }
36664
36665 /* make sure the board interrupts are off */
36666 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
36667 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
36668 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
36669 if (rc)
36670 goto clean2;
36671 @@ -5109,7 +5109,7 @@ reinit_after_soft_reset:
36672 * fake ones to scoop up any residual completions.
36673 */
36674 spin_lock_irqsave(&h->lock, flags);
36675 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
36676 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
36677 spin_unlock_irqrestore(&h->lock, flags);
36678 free_irq(h->intr[h->intr_mode], h);
36679 rc = cciss_request_irq(h, cciss_msix_discard_completions,
36680 @@ -5129,9 +5129,9 @@ reinit_after_soft_reset:
36681 dev_info(&h->pdev->dev, "Board READY.\n");
36682 dev_info(&h->pdev->dev,
36683 "Waiting for stale completions to drain.\n");
36684 - h->access.set_intr_mask(h, CCISS_INTR_ON);
36685 + h->access->set_intr_mask(h, CCISS_INTR_ON);
36686 msleep(10000);
36687 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
36688 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
36689
36690 rc = controller_reset_failed(h->cfgtable);
36691 if (rc)
36692 @@ -5154,7 +5154,7 @@ reinit_after_soft_reset:
36693 cciss_scsi_setup(h);
36694
36695 /* Turn the interrupts on so we can service requests */
36696 - h->access.set_intr_mask(h, CCISS_INTR_ON);
36697 + h->access->set_intr_mask(h, CCISS_INTR_ON);
36698
36699 /* Get the firmware version */
36700 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
36701 @@ -5226,7 +5226,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
36702 kfree(flush_buf);
36703 if (return_code != IO_OK)
36704 dev_warn(&h->pdev->dev, "Error flushing cache\n");
36705 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
36706 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
36707 free_irq(h->intr[h->intr_mode], h);
36708 }
36709
36710 diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
36711 index 7fda30e..eb5dfe0 100644
36712 --- a/drivers/block/cciss.h
36713 +++ b/drivers/block/cciss.h
36714 @@ -101,7 +101,7 @@ struct ctlr_info
36715 /* information about each logical volume */
36716 drive_info_struct *drv[CISS_MAX_LUN];
36717
36718 - struct access_method access;
36719 + struct access_method *access;
36720
36721 /* queue and queue Info */
36722 struct list_head reqQ;
36723 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
36724 index 2b94403..fd6ad1f 100644
36725 --- a/drivers/block/cpqarray.c
36726 +++ b/drivers/block/cpqarray.c
36727 @@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
36728 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
36729 goto Enomem4;
36730 }
36731 - hba[i]->access.set_intr_mask(hba[i], 0);
36732 + hba[i]->access->set_intr_mask(hba[i], 0);
36733 if (request_irq(hba[i]->intr, do_ida_intr,
36734 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
36735 {
36736 @@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
36737 add_timer(&hba[i]->timer);
36738
36739 /* Enable IRQ now that spinlock and rate limit timer are set up */
36740 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
36741 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
36742
36743 for(j=0; j<NWD; j++) {
36744 struct gendisk *disk = ida_gendisk[i][j];
36745 @@ -694,7 +694,7 @@ DBGINFO(
36746 for(i=0; i<NR_PRODUCTS; i++) {
36747 if (board_id == products[i].board_id) {
36748 c->product_name = products[i].product_name;
36749 - c->access = *(products[i].access);
36750 + c->access = products[i].access;
36751 break;
36752 }
36753 }
36754 @@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
36755 hba[ctlr]->intr = intr;
36756 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
36757 hba[ctlr]->product_name = products[j].product_name;
36758 - hba[ctlr]->access = *(products[j].access);
36759 + hba[ctlr]->access = products[j].access;
36760 hba[ctlr]->ctlr = ctlr;
36761 hba[ctlr]->board_id = board_id;
36762 hba[ctlr]->pci_dev = NULL; /* not PCI */
36763 @@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
36764
36765 while((c = h->reqQ) != NULL) {
36766 /* Can't do anything if we're busy */
36767 - if (h->access.fifo_full(h) == 0)
36768 + if (h->access->fifo_full(h) == 0)
36769 return;
36770
36771 /* Get the first entry from the request Q */
36772 @@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
36773 h->Qdepth--;
36774
36775 /* Tell the controller to do our bidding */
36776 - h->access.submit_command(h, c);
36777 + h->access->submit_command(h, c);
36778
36779 /* Get onto the completion Q */
36780 addQ(&h->cmpQ, c);
36781 @@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
36782 unsigned long flags;
36783 __u32 a,a1;
36784
36785 - istat = h->access.intr_pending(h);
36786 + istat = h->access->intr_pending(h);
36787 /* Is this interrupt for us? */
36788 if (istat == 0)
36789 return IRQ_NONE;
36790 @@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
36791 */
36792 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
36793 if (istat & FIFO_NOT_EMPTY) {
36794 - while((a = h->access.command_completed(h))) {
36795 + while((a = h->access->command_completed(h))) {
36796 a1 = a; a &= ~3;
36797 if ((c = h->cmpQ) == NULL)
36798 {
36799 @@ -1448,11 +1448,11 @@ static int sendcmd(
36800 /*
36801 * Disable interrupt
36802 */
36803 - info_p->access.set_intr_mask(info_p, 0);
36804 + info_p->access->set_intr_mask(info_p, 0);
36805 /* Make sure there is room in the command FIFO */
36806 /* Actually it should be completely empty at this time. */
36807 for (i = 200000; i > 0; i--) {
36808 - temp = info_p->access.fifo_full(info_p);
36809 + temp = info_p->access->fifo_full(info_p);
36810 if (temp != 0) {
36811 break;
36812 }
36813 @@ -1465,7 +1465,7 @@ DBG(
36814 /*
36815 * Send the cmd
36816 */
36817 - info_p->access.submit_command(info_p, c);
36818 + info_p->access->submit_command(info_p, c);
36819 complete = pollcomplete(ctlr);
36820
36821 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
36822 @@ -1548,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
36823 * we check the new geometry. Then turn interrupts back on when
36824 * we're done.
36825 */
36826 - host->access.set_intr_mask(host, 0);
36827 + host->access->set_intr_mask(host, 0);
36828 getgeometry(ctlr);
36829 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
36830 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
36831
36832 for(i=0; i<NWD; i++) {
36833 struct gendisk *disk = ida_gendisk[ctlr][i];
36834 @@ -1590,7 +1590,7 @@ static int pollcomplete(int ctlr)
36835 /* Wait (up to 2 seconds) for a command to complete */
36836
36837 for (i = 200000; i > 0; i--) {
36838 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
36839 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
36840 if (done == 0) {
36841 udelay(10); /* a short fixed delay */
36842 } else
36843 diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
36844 index be73e9d..7fbf140 100644
36845 --- a/drivers/block/cpqarray.h
36846 +++ b/drivers/block/cpqarray.h
36847 @@ -99,7 +99,7 @@ struct ctlr_info {
36848 drv_info_t drv[NWD];
36849 struct proc_dir_entry *proc;
36850
36851 - struct access_method access;
36852 + struct access_method *access;
36853
36854 cmdlist_t *reqQ;
36855 cmdlist_t *cmpQ;
36856 diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
36857 index 2d7f608..11245fe 100644
36858 --- a/drivers/block/drbd/drbd_int.h
36859 +++ b/drivers/block/drbd/drbd_int.h
36860 @@ -582,7 +582,7 @@ struct drbd_epoch {
36861 struct drbd_tconn *tconn;
36862 struct list_head list;
36863 unsigned int barrier_nr;
36864 - atomic_t epoch_size; /* increased on every request added. */
36865 + atomic_unchecked_t epoch_size; /* increased on every request added. */
36866 atomic_t active; /* increased on every req. added, and dec on every finished. */
36867 unsigned long flags;
36868 };
36869 @@ -1022,7 +1022,7 @@ struct drbd_conf {
36870 unsigned int al_tr_number;
36871 int al_tr_cycle;
36872 wait_queue_head_t seq_wait;
36873 - atomic_t packet_seq;
36874 + atomic_unchecked_t packet_seq;
36875 unsigned int peer_seq;
36876 spinlock_t peer_seq_lock;
36877 unsigned int minor;
36878 @@ -1572,7 +1572,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
36879 char __user *uoptval;
36880 int err;
36881
36882 - uoptval = (char __user __force *)optval;
36883 + uoptval = (char __force_user *)optval;
36884
36885 set_fs(KERNEL_DS);
36886 if (level == SOL_SOCKET)
36887 diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
36888 index 55635ed..40e837c 100644
36889 --- a/drivers/block/drbd/drbd_main.c
36890 +++ b/drivers/block/drbd/drbd_main.c
36891 @@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
36892 p->sector = sector;
36893 p->block_id = block_id;
36894 p->blksize = blksize;
36895 - p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
36896 + p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
36897 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
36898 }
36899
36900 @@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
36901 return -EIO;
36902 p->sector = cpu_to_be64(req->i.sector);
36903 p->block_id = (unsigned long)req;
36904 - p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
36905 + p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
36906 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
36907 if (mdev->state.conn >= C_SYNC_SOURCE &&
36908 mdev->state.conn <= C_PAUSED_SYNC_T)
36909 @@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
36910 {
36911 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
36912
36913 - if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
36914 - conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
36915 + if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
36916 + conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
36917 kfree(tconn->current_epoch);
36918
36919 idr_destroy(&tconn->volumes);
36920 diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
36921 index 8cc1e64..ba7ffa9 100644
36922 --- a/drivers/block/drbd/drbd_nl.c
36923 +++ b/drivers/block/drbd/drbd_nl.c
36924 @@ -3440,7 +3440,7 @@ out:
36925
36926 void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
36927 {
36928 - static atomic_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
36929 + static atomic_unchecked_t drbd_genl_seq = ATOMIC_INIT(2); /* two. */
36930 struct sk_buff *msg;
36931 struct drbd_genlmsghdr *d_out;
36932 unsigned seq;
36933 @@ -3453,7 +3453,7 @@ void drbd_bcast_event(struct drbd_conf *mdev, const struct sib_info *sib)
36934 return;
36935 }
36936
36937 - seq = atomic_inc_return(&drbd_genl_seq);
36938 + seq = atomic_inc_return_unchecked(&drbd_genl_seq);
36939 msg = genlmsg_new(NLMSG_GOODSIZE, GFP_NOIO);
36940 if (!msg)
36941 goto failed;
36942 diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
36943 index cc29cd3..d4b058b 100644
36944 --- a/drivers/block/drbd/drbd_receiver.c
36945 +++ b/drivers/block/drbd/drbd_receiver.c
36946 @@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
36947 {
36948 int err;
36949
36950 - atomic_set(&mdev->packet_seq, 0);
36951 + atomic_set_unchecked(&mdev->packet_seq, 0);
36952 mdev->peer_seq = 0;
36953
36954 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
36955 @@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
36956 do {
36957 next_epoch = NULL;
36958
36959 - epoch_size = atomic_read(&epoch->epoch_size);
36960 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
36961
36962 switch (ev & ~EV_CLEANUP) {
36963 case EV_PUT:
36964 @@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
36965 rv = FE_DESTROYED;
36966 } else {
36967 epoch->flags = 0;
36968 - atomic_set(&epoch->epoch_size, 0);
36969 + atomic_set_unchecked(&epoch->epoch_size, 0);
36970 /* atomic_set(&epoch->active, 0); is already zero */
36971 if (rv == FE_STILL_LIVE)
36972 rv = FE_RECYCLED;
36973 @@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
36974 conn_wait_active_ee_empty(tconn);
36975 drbd_flush(tconn);
36976
36977 - if (atomic_read(&tconn->current_epoch->epoch_size)) {
36978 + if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
36979 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
36980 if (epoch)
36981 break;
36982 @@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
36983 }
36984
36985 epoch->flags = 0;
36986 - atomic_set(&epoch->epoch_size, 0);
36987 + atomic_set_unchecked(&epoch->epoch_size, 0);
36988 atomic_set(&epoch->active, 0);
36989
36990 spin_lock(&tconn->epoch_lock);
36991 - if (atomic_read(&tconn->current_epoch->epoch_size)) {
36992 + if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
36993 list_add(&epoch->list, &tconn->current_epoch->list);
36994 tconn->current_epoch = epoch;
36995 tconn->epochs++;
36996 @@ -2172,7 +2172,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
36997
36998 err = wait_for_and_update_peer_seq(mdev, peer_seq);
36999 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
37000 - atomic_inc(&tconn->current_epoch->epoch_size);
37001 + atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
37002 err2 = drbd_drain_block(mdev, pi->size);
37003 if (!err)
37004 err = err2;
37005 @@ -2206,7 +2206,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
37006
37007 spin_lock(&tconn->epoch_lock);
37008 peer_req->epoch = tconn->current_epoch;
37009 - atomic_inc(&peer_req->epoch->epoch_size);
37010 + atomic_inc_unchecked(&peer_req->epoch->epoch_size);
37011 atomic_inc(&peer_req->epoch->active);
37012 spin_unlock(&tconn->epoch_lock);
37013
37014 @@ -4347,7 +4347,7 @@ struct data_cmd {
37015 int expect_payload;
37016 size_t pkt_size;
37017 int (*fn)(struct drbd_tconn *, struct packet_info *);
37018 -};
37019 +} __do_const;
37020
37021 static struct data_cmd drbd_cmd_handler[] = {
37022 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
37023 @@ -4467,7 +4467,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
37024 if (!list_empty(&tconn->current_epoch->list))
37025 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
37026 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
37027 - atomic_set(&tconn->current_epoch->epoch_size, 0);
37028 + atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
37029 tconn->send.seen_any_write_yet = false;
37030
37031 conn_info(tconn, "Connection closed\n");
37032 @@ -5223,7 +5223,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
37033 struct asender_cmd {
37034 size_t pkt_size;
37035 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
37036 -};
37037 +} __do_const;
37038
37039 static struct asender_cmd asender_tbl[] = {
37040 [P_PING] = { 0, got_Ping },
37041 diff --git a/drivers/block/loop.c b/drivers/block/loop.c
37042 index c8dac73..1800093 100644
37043 --- a/drivers/block/loop.c
37044 +++ b/drivers/block/loop.c
37045 @@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
37046
37047 file_start_write(file);
37048 set_fs(get_ds());
37049 - bw = file->f_op->write(file, buf, len, &pos);
37050 + bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
37051 set_fs(old_fs);
37052 file_end_write(file);
37053 if (likely(bw == len))
37054 diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
37055 index 5618847..5a46f3b 100644
37056 --- a/drivers/block/pktcdvd.c
37057 +++ b/drivers/block/pktcdvd.c
37058 @@ -108,7 +108,7 @@ static int pkt_seq_show(struct seq_file *m, void *p);
37059
37060 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd)
37061 {
37062 - return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1);
37063 + return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1UL);
37064 }
37065
37066 /*
37067 @@ -1897,7 +1897,7 @@ static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd)
37068 return -EROFS;
37069 }
37070 pd->settings.fp = ti.fp;
37071 - pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1);
37072 + pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1UL);
37073
37074 if (ti.nwa_v) {
37075 pd->nwa = be32_to_cpu(ti.next_writable);
37076 diff --git a/drivers/bluetooth/btwilink.c b/drivers/bluetooth/btwilink.c
37077 index 60abf59..80789e1 100644
37078 --- a/drivers/bluetooth/btwilink.c
37079 +++ b/drivers/bluetooth/btwilink.c
37080 @@ -293,7 +293,7 @@ static int ti_st_send_frame(struct sk_buff *skb)
37081
37082 static int bt_ti_probe(struct platform_device *pdev)
37083 {
37084 - static struct ti_st *hst;
37085 + struct ti_st *hst;
37086 struct hci_dev *hdev;
37087 int err;
37088
37089 diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
37090 index 2009266..7be9ca2 100644
37091 --- a/drivers/bus/arm-cci.c
37092 +++ b/drivers/bus/arm-cci.c
37093 @@ -405,7 +405,7 @@ static int __init cci_probe(void)
37094
37095 nb_cci_ports = cci_config->nb_ace + cci_config->nb_ace_lite;
37096
37097 - ports = kcalloc(sizeof(*ports), nb_cci_ports, GFP_KERNEL);
37098 + ports = kcalloc(nb_cci_ports, sizeof(*ports), GFP_KERNEL);
37099 if (!ports)
37100 return -ENOMEM;
37101
37102 diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
37103 index 8a3aff7..d7538c2 100644
37104 --- a/drivers/cdrom/cdrom.c
37105 +++ b/drivers/cdrom/cdrom.c
37106 @@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
37107 ENSURE(reset, CDC_RESET);
37108 ENSURE(generic_packet, CDC_GENERIC_PACKET);
37109 cdi->mc_flags = 0;
37110 - cdo->n_minors = 0;
37111 cdi->options = CDO_USE_FFLAGS;
37112
37113 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
37114 @@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
37115 else
37116 cdi->cdda_method = CDDA_OLD;
37117
37118 - if (!cdo->generic_packet)
37119 - cdo->generic_packet = cdrom_dummy_generic_packet;
37120 + if (!cdo->generic_packet) {
37121 + pax_open_kernel();
37122 + *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
37123 + pax_close_kernel();
37124 + }
37125
37126 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
37127 mutex_lock(&cdrom_mutex);
37128 @@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
37129 if (cdi->exit)
37130 cdi->exit(cdi);
37131
37132 - cdi->ops->n_minors--;
37133 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
37134 }
37135
37136 @@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
37137 */
37138 nr = nframes;
37139 do {
37140 - cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
37141 + cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
37142 if (cgc.buffer)
37143 break;
37144
37145 @@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
37146 struct cdrom_device_info *cdi;
37147 int ret;
37148
37149 - ret = scnprintf(info + *pos, max_size - *pos, header);
37150 + ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
37151 if (!ret)
37152 return 1;
37153
37154 diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
37155 index 5980cb9..6d7bd7e 100644
37156 --- a/drivers/cdrom/gdrom.c
37157 +++ b/drivers/cdrom/gdrom.c
37158 @@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
37159 .audio_ioctl = gdrom_audio_ioctl,
37160 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
37161 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
37162 - .n_minors = 1,
37163 };
37164
37165 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
37166 diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
37167 index 1421997..33f5d6d 100644
37168 --- a/drivers/char/Kconfig
37169 +++ b/drivers/char/Kconfig
37170 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
37171
37172 config DEVKMEM
37173 bool "/dev/kmem virtual device support"
37174 - default y
37175 + default n
37176 + depends on !GRKERNSEC_KMEM
37177 help
37178 Say Y here if you want to support the /dev/kmem device. The
37179 /dev/kmem device is rarely used, but can be used for certain
37180 @@ -570,6 +571,7 @@ config DEVPORT
37181 bool
37182 depends on !M68K
37183 depends on ISA || PCI
37184 + depends on !GRKERNSEC_KMEM
37185 default y
37186
37187 source "drivers/s390/char/Kconfig"
37188 diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
37189 index a48e05b..6bac831 100644
37190 --- a/drivers/char/agp/compat_ioctl.c
37191 +++ b/drivers/char/agp/compat_ioctl.c
37192 @@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
37193 return -ENOMEM;
37194 }
37195
37196 - if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
37197 + if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
37198 sizeof(*usegment) * ureserve.seg_count)) {
37199 kfree(usegment);
37200 kfree(ksegment);
37201 diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
37202 index 1b19239..b87b143 100644
37203 --- a/drivers/char/agp/frontend.c
37204 +++ b/drivers/char/agp/frontend.c
37205 @@ -819,7 +819,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
37206 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
37207 return -EFAULT;
37208
37209 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
37210 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
37211 return -EFAULT;
37212
37213 client = agp_find_client_by_pid(reserve.pid);
37214 @@ -849,7 +849,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
37215 if (segment == NULL)
37216 return -ENOMEM;
37217
37218 - if (copy_from_user(segment, (void __user *) reserve.seg_list,
37219 + if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
37220 sizeof(struct agp_segment) * reserve.seg_count)) {
37221 kfree(segment);
37222 return -EFAULT;
37223 diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
37224 index 4f94375..413694e 100644
37225 --- a/drivers/char/genrtc.c
37226 +++ b/drivers/char/genrtc.c
37227 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
37228 switch (cmd) {
37229
37230 case RTC_PLL_GET:
37231 + memset(&pll, 0, sizeof(pll));
37232 if (get_rtc_pll(&pll))
37233 return -EINVAL;
37234 else
37235 diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
37236 index 448ce5e..3a76625 100644
37237 --- a/drivers/char/hpet.c
37238 +++ b/drivers/char/hpet.c
37239 @@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
37240 }
37241
37242 static int
37243 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
37244 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
37245 struct hpet_info *info)
37246 {
37247 struct hpet_timer __iomem *timer;
37248 diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
37249 index 86fe45c..c0ea948 100644
37250 --- a/drivers/char/hw_random/intel-rng.c
37251 +++ b/drivers/char/hw_random/intel-rng.c
37252 @@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
37253
37254 if (no_fwh_detect)
37255 return -ENODEV;
37256 - printk(warning);
37257 + printk("%s", warning);
37258 return -EBUSY;
37259 }
37260
37261 diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
37262 index ec4e10f..f2a763b 100644
37263 --- a/drivers/char/ipmi/ipmi_msghandler.c
37264 +++ b/drivers/char/ipmi/ipmi_msghandler.c
37265 @@ -420,7 +420,7 @@ struct ipmi_smi {
37266 struct proc_dir_entry *proc_dir;
37267 char proc_dir_name[10];
37268
37269 - atomic_t stats[IPMI_NUM_STATS];
37270 + atomic_unchecked_t stats[IPMI_NUM_STATS];
37271
37272 /*
37273 * run_to_completion duplicate of smb_info, smi_info
37274 @@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
37275
37276
37277 #define ipmi_inc_stat(intf, stat) \
37278 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
37279 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
37280 #define ipmi_get_stat(intf, stat) \
37281 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
37282 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
37283
37284 static int is_lan_addr(struct ipmi_addr *addr)
37285 {
37286 @@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
37287 INIT_LIST_HEAD(&intf->cmd_rcvrs);
37288 init_waitqueue_head(&intf->waitq);
37289 for (i = 0; i < IPMI_NUM_STATS; i++)
37290 - atomic_set(&intf->stats[i], 0);
37291 + atomic_set_unchecked(&intf->stats[i], 0);
37292
37293 intf->proc_dir = NULL;
37294
37295 diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
37296 index 15e4a60..b046093 100644
37297 --- a/drivers/char/ipmi/ipmi_si_intf.c
37298 +++ b/drivers/char/ipmi/ipmi_si_intf.c
37299 @@ -280,7 +280,7 @@ struct smi_info {
37300 unsigned char slave_addr;
37301
37302 /* Counters and things for the proc filesystem. */
37303 - atomic_t stats[SI_NUM_STATS];
37304 + atomic_unchecked_t stats[SI_NUM_STATS];
37305
37306 struct task_struct *thread;
37307
37308 @@ -289,9 +289,9 @@ struct smi_info {
37309 };
37310
37311 #define smi_inc_stat(smi, stat) \
37312 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
37313 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
37314 #define smi_get_stat(smi, stat) \
37315 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
37316 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
37317
37318 #define SI_MAX_PARMS 4
37319
37320 @@ -3324,7 +3324,7 @@ static int try_smi_init(struct smi_info *new_smi)
37321 atomic_set(&new_smi->req_events, 0);
37322 new_smi->run_to_completion = 0;
37323 for (i = 0; i < SI_NUM_STATS; i++)
37324 - atomic_set(&new_smi->stats[i], 0);
37325 + atomic_set_unchecked(&new_smi->stats[i], 0);
37326
37327 new_smi->interrupt_disabled = 1;
37328 atomic_set(&new_smi->stop_operation, 0);
37329 diff --git a/drivers/char/mem.c b/drivers/char/mem.c
37330 index f895a8c..2bc9147 100644
37331 --- a/drivers/char/mem.c
37332 +++ b/drivers/char/mem.c
37333 @@ -18,6 +18,7 @@
37334 #include <linux/raw.h>
37335 #include <linux/tty.h>
37336 #include <linux/capability.h>
37337 +#include <linux/security.h>
37338 #include <linux/ptrace.h>
37339 #include <linux/device.h>
37340 #include <linux/highmem.h>
37341 @@ -37,6 +38,10 @@
37342
37343 #define DEVPORT_MINOR 4
37344
37345 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
37346 +extern const struct file_operations grsec_fops;
37347 +#endif
37348 +
37349 static inline unsigned long size_inside_page(unsigned long start,
37350 unsigned long size)
37351 {
37352 @@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37353
37354 while (cursor < to) {
37355 if (!devmem_is_allowed(pfn)) {
37356 +#ifdef CONFIG_GRKERNSEC_KMEM
37357 + gr_handle_mem_readwrite(from, to);
37358 +#else
37359 printk(KERN_INFO
37360 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
37361 current->comm, from, to);
37362 +#endif
37363 return 0;
37364 }
37365 cursor += PAGE_SIZE;
37366 @@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37367 }
37368 return 1;
37369 }
37370 +#elif defined(CONFIG_GRKERNSEC_KMEM)
37371 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37372 +{
37373 + return 0;
37374 +}
37375 #else
37376 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
37377 {
37378 @@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
37379
37380 while (count > 0) {
37381 unsigned long remaining;
37382 + char *temp;
37383
37384 sz = size_inside_page(p, count);
37385
37386 @@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
37387 if (!ptr)
37388 return -EFAULT;
37389
37390 - remaining = copy_to_user(buf, ptr, sz);
37391 +#ifdef CONFIG_PAX_USERCOPY
37392 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
37393 + if (!temp) {
37394 + unxlate_dev_mem_ptr(p, ptr);
37395 + return -ENOMEM;
37396 + }
37397 + memcpy(temp, ptr, sz);
37398 +#else
37399 + temp = ptr;
37400 +#endif
37401 +
37402 + remaining = copy_to_user(buf, temp, sz);
37403 +
37404 +#ifdef CONFIG_PAX_USERCOPY
37405 + kfree(temp);
37406 +#endif
37407 +
37408 unxlate_dev_mem_ptr(p, ptr);
37409 if (remaining)
37410 return -EFAULT;
37411 @@ -364,9 +395,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
37412 size_t count, loff_t *ppos)
37413 {
37414 unsigned long p = *ppos;
37415 - ssize_t low_count, read, sz;
37416 + ssize_t low_count, read, sz, err = 0;
37417 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
37418 - int err = 0;
37419
37420 read = 0;
37421 if (p < (unsigned long) high_memory) {
37422 @@ -388,6 +418,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
37423 }
37424 #endif
37425 while (low_count > 0) {
37426 + char *temp;
37427 +
37428 sz = size_inside_page(p, low_count);
37429
37430 /*
37431 @@ -397,7 +429,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
37432 */
37433 kbuf = xlate_dev_kmem_ptr((char *)p);
37434
37435 - if (copy_to_user(buf, kbuf, sz))
37436 +#ifdef CONFIG_PAX_USERCOPY
37437 + temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
37438 + if (!temp)
37439 + return -ENOMEM;
37440 + memcpy(temp, kbuf, sz);
37441 +#else
37442 + temp = kbuf;
37443 +#endif
37444 +
37445 + err = copy_to_user(buf, temp, sz);
37446 +
37447 +#ifdef CONFIG_PAX_USERCOPY
37448 + kfree(temp);
37449 +#endif
37450 +
37451 + if (err)
37452 return -EFAULT;
37453 buf += sz;
37454 p += sz;
37455 @@ -822,6 +869,9 @@ static const struct memdev {
37456 #ifdef CONFIG_PRINTK
37457 [11] = { "kmsg", 0644, &kmsg_fops, NULL },
37458 #endif
37459 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
37460 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
37461 +#endif
37462 };
37463
37464 static int memory_open(struct inode *inode, struct file *filp)
37465 @@ -893,7 +943,7 @@ static int __init chr_dev_init(void)
37466 continue;
37467
37468 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
37469 - NULL, devlist[minor].name);
37470 + NULL, "%s", devlist[minor].name);
37471 }
37472
37473 return tty_init();
37474 diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
37475 index 9df78e2..01ba9ae 100644
37476 --- a/drivers/char/nvram.c
37477 +++ b/drivers/char/nvram.c
37478 @@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
37479
37480 spin_unlock_irq(&rtc_lock);
37481
37482 - if (copy_to_user(buf, contents, tmp - contents))
37483 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
37484 return -EFAULT;
37485
37486 *ppos = i;
37487 diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
37488 index d39cca6..8c1e269 100644
37489 --- a/drivers/char/pcmcia/synclink_cs.c
37490 +++ b/drivers/char/pcmcia/synclink_cs.c
37491 @@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
37492
37493 if (debug_level >= DEBUG_LEVEL_INFO)
37494 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
37495 - __FILE__, __LINE__, info->device_name, port->count);
37496 + __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
37497
37498 - WARN_ON(!port->count);
37499 + WARN_ON(!atomic_read(&port->count));
37500
37501 if (tty_port_close_start(port, tty, filp) == 0)
37502 goto cleanup;
37503 @@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
37504 cleanup:
37505 if (debug_level >= DEBUG_LEVEL_INFO)
37506 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
37507 - tty->driver->name, port->count);
37508 + tty->driver->name, atomic_read(&port->count));
37509 }
37510
37511 /* Wait until the transmitter is empty.
37512 @@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
37513
37514 if (debug_level >= DEBUG_LEVEL_INFO)
37515 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
37516 - __FILE__, __LINE__, tty->driver->name, port->count);
37517 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
37518
37519 /* If port is closing, signal caller to try again */
37520 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
37521 @@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
37522 goto cleanup;
37523 }
37524 spin_lock(&port->lock);
37525 - port->count++;
37526 + atomic_inc(&port->count);
37527 spin_unlock(&port->lock);
37528 spin_unlock_irqrestore(&info->netlock, flags);
37529
37530 - if (port->count == 1) {
37531 + if (atomic_read(&port->count) == 1) {
37532 /* 1st open on this device, init hardware */
37533 retval = startup(info, tty);
37534 if (retval < 0)
37535 @@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
37536 unsigned short new_crctype;
37537
37538 /* return error if TTY interface open */
37539 - if (info->port.count)
37540 + if (atomic_read(&info->port.count))
37541 return -EBUSY;
37542
37543 switch (encoding)
37544 @@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
37545
37546 /* arbitrate between network and tty opens */
37547 spin_lock_irqsave(&info->netlock, flags);
37548 - if (info->port.count != 0 || info->netcount != 0) {
37549 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
37550 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
37551 spin_unlock_irqrestore(&info->netlock, flags);
37552 return -EBUSY;
37553 @@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
37554 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
37555
37556 /* return error if TTY interface open */
37557 - if (info->port.count)
37558 + if (atomic_read(&info->port.count))
37559 return -EBUSY;
37560
37561 if (cmd != SIOCWANDEV)
37562 diff --git a/drivers/char/random.c b/drivers/char/random.c
37563 index 7a744d3..35a177ee 100644
37564 --- a/drivers/char/random.c
37565 +++ b/drivers/char/random.c
37566 @@ -269,8 +269,13 @@
37567 /*
37568 * Configuration information
37569 */
37570 +#ifdef CONFIG_GRKERNSEC_RANDNET
37571 +#define INPUT_POOL_WORDS 512
37572 +#define OUTPUT_POOL_WORDS 128
37573 +#else
37574 #define INPUT_POOL_WORDS 128
37575 #define OUTPUT_POOL_WORDS 32
37576 +#endif
37577 #define SEC_XFER_SIZE 512
37578 #define EXTRACT_SIZE 10
37579
37580 @@ -310,10 +315,17 @@ static struct poolinfo {
37581 int poolwords;
37582 int tap1, tap2, tap3, tap4, tap5;
37583 } poolinfo_table[] = {
37584 +#ifdef CONFIG_GRKERNSEC_RANDNET
37585 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
37586 + { 512, 411, 308, 208, 104, 1 },
37587 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
37588 + { 128, 103, 76, 51, 25, 1 },
37589 +#else
37590 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
37591 { 128, 103, 76, 51, 25, 1 },
37592 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
37593 { 32, 26, 20, 14, 7, 1 },
37594 +#endif
37595 #if 0
37596 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
37597 { 2048, 1638, 1231, 819, 411, 1 },
37598 @@ -521,8 +533,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
37599 input_rotate += i ? 7 : 14;
37600 }
37601
37602 - ACCESS_ONCE(r->input_rotate) = input_rotate;
37603 - ACCESS_ONCE(r->add_ptr) = i;
37604 + ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
37605 + ACCESS_ONCE_RW(r->add_ptr) = i;
37606 smp_wmb();
37607
37608 if (out)
37609 @@ -1029,7 +1041,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
37610
37611 extract_buf(r, tmp);
37612 i = min_t(int, nbytes, EXTRACT_SIZE);
37613 - if (copy_to_user(buf, tmp, i)) {
37614 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
37615 ret = -EFAULT;
37616 break;
37617 }
37618 @@ -1365,7 +1377,7 @@ EXPORT_SYMBOL(generate_random_uuid);
37619 #include <linux/sysctl.h>
37620
37621 static int min_read_thresh = 8, min_write_thresh;
37622 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
37623 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
37624 static int max_write_thresh = INPUT_POOL_WORDS * 32;
37625 static char sysctl_bootid[16];
37626
37627 @@ -1381,7 +1393,7 @@ static char sysctl_bootid[16];
37628 static int proc_do_uuid(struct ctl_table *table, int write,
37629 void __user *buffer, size_t *lenp, loff_t *ppos)
37630 {
37631 - struct ctl_table fake_table;
37632 + ctl_table_no_const fake_table;
37633 unsigned char buf[64], tmp_uuid[16], *uuid;
37634
37635 uuid = table->data;
37636 diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
37637 index 7cc1fe22..b602d6b 100644
37638 --- a/drivers/char/sonypi.c
37639 +++ b/drivers/char/sonypi.c
37640 @@ -54,6 +54,7 @@
37641
37642 #include <asm/uaccess.h>
37643 #include <asm/io.h>
37644 +#include <asm/local.h>
37645
37646 #include <linux/sonypi.h>
37647
37648 @@ -490,7 +491,7 @@ static struct sonypi_device {
37649 spinlock_t fifo_lock;
37650 wait_queue_head_t fifo_proc_list;
37651 struct fasync_struct *fifo_async;
37652 - int open_count;
37653 + local_t open_count;
37654 int model;
37655 struct input_dev *input_jog_dev;
37656 struct input_dev *input_key_dev;
37657 @@ -892,7 +893,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
37658 static int sonypi_misc_release(struct inode *inode, struct file *file)
37659 {
37660 mutex_lock(&sonypi_device.lock);
37661 - sonypi_device.open_count--;
37662 + local_dec(&sonypi_device.open_count);
37663 mutex_unlock(&sonypi_device.lock);
37664 return 0;
37665 }
37666 @@ -901,9 +902,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
37667 {
37668 mutex_lock(&sonypi_device.lock);
37669 /* Flush input queue on first open */
37670 - if (!sonypi_device.open_count)
37671 + if (!local_read(&sonypi_device.open_count))
37672 kfifo_reset(&sonypi_device.fifo);
37673 - sonypi_device.open_count++;
37674 + local_inc(&sonypi_device.open_count);
37675 mutex_unlock(&sonypi_device.lock);
37676
37677 return 0;
37678 diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
37679 index 64420b3..5c40b56 100644
37680 --- a/drivers/char/tpm/tpm_acpi.c
37681 +++ b/drivers/char/tpm/tpm_acpi.c
37682 @@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
37683 virt = acpi_os_map_memory(start, len);
37684 if (!virt) {
37685 kfree(log->bios_event_log);
37686 + log->bios_event_log = NULL;
37687 printk("%s: ERROR - Unable to map memory\n", __func__);
37688 return -EIO;
37689 }
37690
37691 - memcpy_fromio(log->bios_event_log, virt, len);
37692 + memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
37693
37694 acpi_os_unmap_memory(virt, len);
37695 return 0;
37696 diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
37697 index 84ddc55..1d32f1e 100644
37698 --- a/drivers/char/tpm/tpm_eventlog.c
37699 +++ b/drivers/char/tpm/tpm_eventlog.c
37700 @@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
37701 event = addr;
37702
37703 if ((event->event_type == 0 && event->event_size == 0) ||
37704 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
37705 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
37706 return NULL;
37707
37708 return addr;
37709 @@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
37710 return NULL;
37711
37712 if ((event->event_type == 0 && event->event_size == 0) ||
37713 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
37714 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
37715 return NULL;
37716
37717 (*pos)++;
37718 @@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
37719 int i;
37720
37721 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
37722 - seq_putc(m, data[i]);
37723 + if (!seq_putc(m, data[i]))
37724 + return -EFAULT;
37725
37726 return 0;
37727 }
37728 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
37729 index b79cf3e..de172d64f 100644
37730 --- a/drivers/char/virtio_console.c
37731 +++ b/drivers/char/virtio_console.c
37732 @@ -682,7 +682,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
37733 if (to_user) {
37734 ssize_t ret;
37735
37736 - ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
37737 + ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
37738 if (ret)
37739 return -EFAULT;
37740 } else {
37741 @@ -785,7 +785,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
37742 if (!port_has_data(port) && !port->host_connected)
37743 return 0;
37744
37745 - return fill_readbuf(port, ubuf, count, true);
37746 + return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
37747 }
37748
37749 static int wait_port_writable(struct port *port, bool nonblock)
37750 diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
37751 index a33f46f..a720eed 100644
37752 --- a/drivers/clk/clk-composite.c
37753 +++ b/drivers/clk/clk-composite.c
37754 @@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
37755 struct clk *clk;
37756 struct clk_init_data init;
37757 struct clk_composite *composite;
37758 - struct clk_ops *clk_composite_ops;
37759 + clk_ops_no_const *clk_composite_ops;
37760
37761 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
37762 if (!composite) {
37763 diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
37764 index 81dd31a..ef5c542 100644
37765 --- a/drivers/clk/socfpga/clk.c
37766 +++ b/drivers/clk/socfpga/clk.c
37767 @@ -22,6 +22,7 @@
37768 #include <linux/clk-provider.h>
37769 #include <linux/io.h>
37770 #include <linux/of.h>
37771 +#include <asm/pgtable.h>
37772
37773 /* Clock Manager offsets */
37774 #define CLKMGR_CTRL 0x0
37775 @@ -152,8 +153,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
37776 streq(clk_name, "periph_pll") ||
37777 streq(clk_name, "sdram_pll")) {
37778 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
37779 - clk_pll_ops.enable = clk_gate_ops.enable;
37780 - clk_pll_ops.disable = clk_gate_ops.disable;
37781 + pax_open_kernel();
37782 + *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
37783 + *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
37784 + pax_close_kernel();
37785 }
37786
37787 clk = clk_register(NULL, &socfpga_clk->hw.hw);
37788 @@ -244,7 +247,7 @@ static unsigned long socfpga_clk_recalc_rate(struct clk_hw *hwclk,
37789 return parent_rate / div;
37790 }
37791
37792 -static struct clk_ops gateclk_ops = {
37793 +static clk_ops_no_const gateclk_ops __read_only = {
37794 .recalc_rate = socfpga_clk_recalc_rate,
37795 .get_parent = socfpga_clk_get_parent,
37796 .set_parent = socfpga_clk_set_parent,
37797 diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
37798 index 506fd23..01a593f 100644
37799 --- a/drivers/cpufreq/acpi-cpufreq.c
37800 +++ b/drivers/cpufreq/acpi-cpufreq.c
37801 @@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
37802 return sprintf(buf, "%u\n", boost_enabled);
37803 }
37804
37805 -static struct global_attr global_boost = __ATTR(boost, 0644,
37806 +static global_attr_no_const global_boost = __ATTR(boost, 0644,
37807 show_global_boost,
37808 store_global_boost);
37809
37810 @@ -721,8 +721,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
37811 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
37812 per_cpu(acfreq_data, cpu) = data;
37813
37814 - if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
37815 - acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
37816 + if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
37817 + pax_open_kernel();
37818 + *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
37819 + pax_close_kernel();
37820 + }
37821
37822 result = acpi_processor_register_performance(data->acpi_data, cpu);
37823 if (result)
37824 @@ -850,7 +853,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
37825 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
37826 break;
37827 case ACPI_ADR_SPACE_FIXED_HARDWARE:
37828 - acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
37829 + pax_open_kernel();
37830 + *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
37831 + pax_close_kernel();
37832 policy->cur = get_cur_freq_on_cpu(cpu);
37833 break;
37834 default:
37835 diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
37836 index 04548f7..457a342 100644
37837 --- a/drivers/cpufreq/cpufreq.c
37838 +++ b/drivers/cpufreq/cpufreq.c
37839 @@ -2069,7 +2069,7 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
37840 return NOTIFY_OK;
37841 }
37842
37843 -static struct notifier_block __refdata cpufreq_cpu_notifier = {
37844 +static struct notifier_block cpufreq_cpu_notifier = {
37845 .notifier_call = cpufreq_cpu_callback,
37846 };
37847
37848 @@ -2101,8 +2101,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
37849
37850 pr_debug("trying to register driver %s\n", driver_data->name);
37851
37852 - if (driver_data->setpolicy)
37853 - driver_data->flags |= CPUFREQ_CONST_LOOPS;
37854 + if (driver_data->setpolicy) {
37855 + pax_open_kernel();
37856 + *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
37857 + pax_close_kernel();
37858 + }
37859
37860 write_lock_irqsave(&cpufreq_driver_lock, flags);
37861 if (cpufreq_driver) {
37862 diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
37863 index 0806c31..6a73276 100644
37864 --- a/drivers/cpufreq/cpufreq_governor.c
37865 +++ b/drivers/cpufreq/cpufreq_governor.c
37866 @@ -187,7 +187,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37867 struct dbs_data *dbs_data;
37868 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
37869 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
37870 - struct od_ops *od_ops = NULL;
37871 + const struct od_ops *od_ops = NULL;
37872 struct od_dbs_tuners *od_tuners = NULL;
37873 struct cs_dbs_tuners *cs_tuners = NULL;
37874 struct cpu_dbs_common_info *cpu_cdbs;
37875 @@ -253,7 +253,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37876
37877 if ((cdata->governor == GOV_CONSERVATIVE) &&
37878 (!policy->governor->initialized)) {
37879 - struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37880 + const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37881
37882 cpufreq_register_notifier(cs_ops->notifier_block,
37883 CPUFREQ_TRANSITION_NOTIFIER);
37884 @@ -273,7 +273,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
37885
37886 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
37887 (policy->governor->initialized == 1)) {
37888 - struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37889 + const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
37890
37891 cpufreq_unregister_notifier(cs_ops->notifier_block,
37892 CPUFREQ_TRANSITION_NOTIFIER);
37893 diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
37894 index 88cd39f..87f0393 100644
37895 --- a/drivers/cpufreq/cpufreq_governor.h
37896 +++ b/drivers/cpufreq/cpufreq_governor.h
37897 @@ -202,7 +202,7 @@ struct common_dbs_data {
37898 void (*exit)(struct dbs_data *dbs_data);
37899
37900 /* Governor specific ops, see below */
37901 - void *gov_ops;
37902 + const void *gov_ops;
37903 };
37904
37905 /* Governor Per policy data */
37906 @@ -222,7 +222,7 @@ struct od_ops {
37907 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy,
37908 unsigned int freq_next, unsigned int relation);
37909 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq);
37910 -};
37911 +} __no_const;
37912
37913 struct cs_ops {
37914 struct notifier_block *notifier_block;
37915 diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
37916 index 32f26f6..feb657b 100644
37917 --- a/drivers/cpufreq/cpufreq_ondemand.c
37918 +++ b/drivers/cpufreq/cpufreq_ondemand.c
37919 @@ -522,7 +522,7 @@ static void od_exit(struct dbs_data *dbs_data)
37920
37921 define_get_cpu_dbs_routines(od_cpu_dbs_info);
37922
37923 -static struct od_ops od_ops = {
37924 +static struct od_ops od_ops __read_only = {
37925 .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu,
37926 .powersave_bias_target = generic_powersave_bias_target,
37927 .freq_increase = dbs_freq_increase,
37928 @@ -577,14 +577,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
37929 (struct cpufreq_policy *, unsigned int, unsigned int),
37930 unsigned int powersave_bias)
37931 {
37932 - od_ops.powersave_bias_target = f;
37933 + pax_open_kernel();
37934 + *(void **)&od_ops.powersave_bias_target = f;
37935 + pax_close_kernel();
37936 od_set_powersave_bias(powersave_bias);
37937 }
37938 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
37939
37940 void od_unregister_powersave_bias_handler(void)
37941 {
37942 - od_ops.powersave_bias_target = generic_powersave_bias_target;
37943 + pax_open_kernel();
37944 + *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
37945 + pax_close_kernel();
37946 od_set_powersave_bias(0);
37947 }
37948 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
37949 diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
37950 index 4cf0d28..5830372 100644
37951 --- a/drivers/cpufreq/cpufreq_stats.c
37952 +++ b/drivers/cpufreq/cpufreq_stats.c
37953 @@ -352,7 +352,7 @@ static int cpufreq_stat_cpu_callback(struct notifier_block *nfb,
37954 }
37955
37956 /* priority=1 so this will get called before cpufreq_remove_dev */
37957 -static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
37958 +static struct notifier_block cpufreq_stat_cpu_notifier = {
37959 .notifier_call = cpufreq_stat_cpu_callback,
37960 .priority = 1,
37961 };
37962 diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
37963 index 2f0a2a6..93d728e 100644
37964 --- a/drivers/cpufreq/p4-clockmod.c
37965 +++ b/drivers/cpufreq/p4-clockmod.c
37966 @@ -160,10 +160,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
37967 case 0x0F: /* Core Duo */
37968 case 0x16: /* Celeron Core */
37969 case 0x1C: /* Atom */
37970 - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37971 + pax_open_kernel();
37972 + *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37973 + pax_close_kernel();
37974 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
37975 case 0x0D: /* Pentium M (Dothan) */
37976 - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37977 + pax_open_kernel();
37978 + *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37979 + pax_close_kernel();
37980 /* fall through */
37981 case 0x09: /* Pentium M (Banias) */
37982 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
37983 @@ -175,7 +179,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
37984
37985 /* on P-4s, the TSC runs with constant frequency independent whether
37986 * throttling is active or not. */
37987 - p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37988 + pax_open_kernel();
37989 + *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
37990 + pax_close_kernel();
37991
37992 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
37993 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
37994 diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
37995 index ac76b48..2445bc6 100644
37996 --- a/drivers/cpufreq/sparc-us3-cpufreq.c
37997 +++ b/drivers/cpufreq/sparc-us3-cpufreq.c
37998 @@ -18,14 +18,12 @@
37999 #include <asm/head.h>
38000 #include <asm/timer.h>
38001
38002 -static struct cpufreq_driver *cpufreq_us3_driver;
38003 -
38004 struct us3_freq_percpu_info {
38005 struct cpufreq_frequency_table table[4];
38006 };
38007
38008 /* Indexed by cpu number. */
38009 -static struct us3_freq_percpu_info *us3_freq_table;
38010 +static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
38011
38012 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
38013 * in the Safari config register.
38014 @@ -186,12 +184,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
38015
38016 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
38017 {
38018 - if (cpufreq_us3_driver)
38019 - us3_set_cpu_divider_index(policy, 0);
38020 + us3_set_cpu_divider_index(policy->cpu, 0);
38021
38022 return 0;
38023 }
38024
38025 +static int __init us3_freq_init(void);
38026 +static void __exit us3_freq_exit(void);
38027 +
38028 +static struct cpufreq_driver cpufreq_us3_driver = {
38029 + .init = us3_freq_cpu_init,
38030 + .verify = us3_freq_verify,
38031 + .target = us3_freq_target,
38032 + .get = us3_freq_get,
38033 + .exit = us3_freq_cpu_exit,
38034 + .owner = THIS_MODULE,
38035 + .name = "UltraSPARC-III",
38036 +
38037 +};
38038 +
38039 static int __init us3_freq_init(void)
38040 {
38041 unsigned long manuf, impl, ver;
38042 @@ -208,55 +219,15 @@ static int __init us3_freq_init(void)
38043 (impl == CHEETAH_IMPL ||
38044 impl == CHEETAH_PLUS_IMPL ||
38045 impl == JAGUAR_IMPL ||
38046 - impl == PANTHER_IMPL)) {
38047 - struct cpufreq_driver *driver;
38048 -
38049 - ret = -ENOMEM;
38050 - driver = kzalloc(sizeof(*driver), GFP_KERNEL);
38051 - if (!driver)
38052 - goto err_out;
38053 -
38054 - us3_freq_table = kzalloc((NR_CPUS * sizeof(*us3_freq_table)),
38055 - GFP_KERNEL);
38056 - if (!us3_freq_table)
38057 - goto err_out;
38058 -
38059 - driver->init = us3_freq_cpu_init;
38060 - driver->verify = us3_freq_verify;
38061 - driver->target = us3_freq_target;
38062 - driver->get = us3_freq_get;
38063 - driver->exit = us3_freq_cpu_exit;
38064 - strcpy(driver->name, "UltraSPARC-III");
38065 -
38066 - cpufreq_us3_driver = driver;
38067 - ret = cpufreq_register_driver(driver);
38068 - if (ret)
38069 - goto err_out;
38070 -
38071 - return 0;
38072 -
38073 -err_out:
38074 - if (driver) {
38075 - kfree(driver);
38076 - cpufreq_us3_driver = NULL;
38077 - }
38078 - kfree(us3_freq_table);
38079 - us3_freq_table = NULL;
38080 - return ret;
38081 - }
38082 + impl == PANTHER_IMPL))
38083 + return cpufreq_register_driver(&cpufreq_us3_driver);
38084
38085 return -ENODEV;
38086 }
38087
38088 static void __exit us3_freq_exit(void)
38089 {
38090 - if (cpufreq_us3_driver) {
38091 - cpufreq_unregister_driver(cpufreq_us3_driver);
38092 - kfree(cpufreq_us3_driver);
38093 - cpufreq_us3_driver = NULL;
38094 - kfree(us3_freq_table);
38095 - us3_freq_table = NULL;
38096 - }
38097 + cpufreq_unregister_driver(&cpufreq_us3_driver);
38098 }
38099
38100 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
38101 diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
38102 index f897d51..15da295 100644
38103 --- a/drivers/cpufreq/speedstep-centrino.c
38104 +++ b/drivers/cpufreq/speedstep-centrino.c
38105 @@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
38106 !cpu_has(cpu, X86_FEATURE_EST))
38107 return -ENODEV;
38108
38109 - if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
38110 - centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
38111 + if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
38112 + pax_open_kernel();
38113 + *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
38114 + pax_close_kernel();
38115 + }
38116
38117 if (policy->cpu != 0)
38118 return -ENODEV;
38119 diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
38120 index 22c07fb..9dff5ac 100644
38121 --- a/drivers/cpuidle/cpuidle.c
38122 +++ b/drivers/cpuidle/cpuidle.c
38123 @@ -252,7 +252,7 @@ static int poll_idle(struct cpuidle_device *dev,
38124
38125 static void poll_idle_init(struct cpuidle_driver *drv)
38126 {
38127 - struct cpuidle_state *state = &drv->states[0];
38128 + cpuidle_state_no_const *state = &drv->states[0];
38129
38130 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
38131 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
38132 diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
38133 index ea2f8e7..70ac501 100644
38134 --- a/drivers/cpuidle/governor.c
38135 +++ b/drivers/cpuidle/governor.c
38136 @@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
38137 mutex_lock(&cpuidle_lock);
38138 if (__cpuidle_find_governor(gov->name) == NULL) {
38139 ret = 0;
38140 - list_add_tail(&gov->governor_list, &cpuidle_governors);
38141 + pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
38142 if (!cpuidle_curr_governor ||
38143 cpuidle_curr_governor->rating < gov->rating)
38144 cpuidle_switch_governor(gov);
38145 @@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
38146 new_gov = cpuidle_replace_governor(gov->rating);
38147 cpuidle_switch_governor(new_gov);
38148 }
38149 - list_del(&gov->governor_list);
38150 + pax_list_del((struct list_head *)&gov->governor_list);
38151 mutex_unlock(&cpuidle_lock);
38152 }
38153
38154 diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
38155 index 8739cc0..dc859d0 100644
38156 --- a/drivers/cpuidle/sysfs.c
38157 +++ b/drivers/cpuidle/sysfs.c
38158 @@ -134,7 +134,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
38159 NULL
38160 };
38161
38162 -static struct attribute_group cpuidle_attr_group = {
38163 +static attribute_group_no_const cpuidle_attr_group = {
38164 .attrs = cpuidle_default_attrs,
38165 .name = "cpuidle",
38166 };
38167 diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
38168 index 12fea3e..1e28f47 100644
38169 --- a/drivers/crypto/hifn_795x.c
38170 +++ b/drivers/crypto/hifn_795x.c
38171 @@ -51,7 +51,7 @@ module_param_string(hifn_pll_ref, hifn_pll_ref, sizeof(hifn_pll_ref), 0444);
38172 MODULE_PARM_DESC(hifn_pll_ref,
38173 "PLL reference clock (pci[freq] or ext[freq], default ext)");
38174
38175 -static atomic_t hifn_dev_number;
38176 +static atomic_unchecked_t hifn_dev_number;
38177
38178 #define ACRYPTO_OP_DECRYPT 0
38179 #define ACRYPTO_OP_ENCRYPT 1
38180 @@ -2577,7 +2577,7 @@ static int hifn_probe(struct pci_dev *pdev, const struct pci_device_id *id)
38181 goto err_out_disable_pci_device;
38182
38183 snprintf(name, sizeof(name), "hifn%d",
38184 - atomic_inc_return(&hifn_dev_number)-1);
38185 + atomic_inc_return_unchecked(&hifn_dev_number)-1);
38186
38187 err = pci_request_regions(pdev, name);
38188 if (err)
38189 diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
38190 index c99c00d..990a4b2 100644
38191 --- a/drivers/devfreq/devfreq.c
38192 +++ b/drivers/devfreq/devfreq.c
38193 @@ -607,7 +607,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
38194 goto err_out;
38195 }
38196
38197 - list_add(&governor->node, &devfreq_governor_list);
38198 + pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
38199
38200 list_for_each_entry(devfreq, &devfreq_list, node) {
38201 int ret = 0;
38202 @@ -695,7 +695,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
38203 }
38204 }
38205
38206 - list_del(&governor->node);
38207 + pax_list_del((struct list_head *)&governor->node);
38208 err_out:
38209 mutex_unlock(&devfreq_list_lock);
38210
38211 diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
38212 index 1069e88..dfcd642 100644
38213 --- a/drivers/dma/sh/shdmac.c
38214 +++ b/drivers/dma/sh/shdmac.c
38215 @@ -511,7 +511,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
38216 return ret;
38217 }
38218
38219 -static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
38220 +static struct notifier_block sh_dmae_nmi_notifier = {
38221 .notifier_call = sh_dmae_nmi_handler,
38222
38223 /* Run before NMI debug handler and KGDB */
38224 diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
38225 index 211021d..201d47f 100644
38226 --- a/drivers/edac/edac_device.c
38227 +++ b/drivers/edac/edac_device.c
38228 @@ -474,9 +474,9 @@ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
38229 */
38230 int edac_device_alloc_index(void)
38231 {
38232 - static atomic_t device_indexes = ATOMIC_INIT(0);
38233 + static atomic_unchecked_t device_indexes = ATOMIC_INIT(0);
38234
38235 - return atomic_inc_return(&device_indexes) - 1;
38236 + return atomic_inc_return_unchecked(&device_indexes) - 1;
38237 }
38238 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
38239
38240 diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
38241 index 9f7e0e60..348c875 100644
38242 --- a/drivers/edac/edac_mc_sysfs.c
38243 +++ b/drivers/edac/edac_mc_sysfs.c
38244 @@ -150,7 +150,7 @@ static const char * const edac_caps[] = {
38245 struct dev_ch_attribute {
38246 struct device_attribute attr;
38247 int channel;
38248 -};
38249 +} __do_const;
38250
38251 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
38252 struct dev_ch_attribute dev_attr_legacy_##_name = \
38253 @@ -1007,14 +1007,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
38254 }
38255
38256 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
38257 + pax_open_kernel();
38258 if (mci->get_sdram_scrub_rate) {
38259 - dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
38260 - dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
38261 + *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
38262 + *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
38263 }
38264 if (mci->set_sdram_scrub_rate) {
38265 - dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
38266 - dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
38267 + *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
38268 + *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
38269 }
38270 + pax_close_kernel();
38271 err = device_create_file(&mci->dev,
38272 &dev_attr_sdram_scrub_rate);
38273 if (err) {
38274 diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
38275 index dd370f9..0281629 100644
38276 --- a/drivers/edac/edac_pci.c
38277 +++ b/drivers/edac/edac_pci.c
38278 @@ -29,7 +29,7 @@
38279
38280 static DEFINE_MUTEX(edac_pci_ctls_mutex);
38281 static LIST_HEAD(edac_pci_list);
38282 -static atomic_t pci_indexes = ATOMIC_INIT(0);
38283 +static atomic_unchecked_t pci_indexes = ATOMIC_INIT(0);
38284
38285 /*
38286 * edac_pci_alloc_ctl_info
38287 @@ -315,7 +315,7 @@ EXPORT_SYMBOL_GPL(edac_pci_reset_delay_period);
38288 */
38289 int edac_pci_alloc_index(void)
38290 {
38291 - return atomic_inc_return(&pci_indexes) - 1;
38292 + return atomic_inc_return_unchecked(&pci_indexes) - 1;
38293 }
38294 EXPORT_SYMBOL_GPL(edac_pci_alloc_index);
38295
38296 diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
38297 index e8658e4..22746d6 100644
38298 --- a/drivers/edac/edac_pci_sysfs.c
38299 +++ b/drivers/edac/edac_pci_sysfs.c
38300 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
38301 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
38302 static int edac_pci_poll_msec = 1000; /* one second workq period */
38303
38304 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
38305 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
38306 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
38307 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
38308
38309 static struct kobject *edac_pci_top_main_kobj;
38310 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
38311 @@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
38312 void *value;
38313 ssize_t(*show) (void *, char *);
38314 ssize_t(*store) (void *, const char *, size_t);
38315 -};
38316 +} __do_const;
38317
38318 /* Set of show/store abstract level functions for PCI Parity object */
38319 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
38320 @@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38321 edac_printk(KERN_CRIT, EDAC_PCI,
38322 "Signaled System Error on %s\n",
38323 pci_name(dev));
38324 - atomic_inc(&pci_nonparity_count);
38325 + atomic_inc_unchecked(&pci_nonparity_count);
38326 }
38327
38328 if (status & (PCI_STATUS_PARITY)) {
38329 @@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38330 "Master Data Parity Error on %s\n",
38331 pci_name(dev));
38332
38333 - atomic_inc(&pci_parity_count);
38334 + atomic_inc_unchecked(&pci_parity_count);
38335 }
38336
38337 if (status & (PCI_STATUS_DETECTED_PARITY)) {
38338 @@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38339 "Detected Parity Error on %s\n",
38340 pci_name(dev));
38341
38342 - atomic_inc(&pci_parity_count);
38343 + atomic_inc_unchecked(&pci_parity_count);
38344 }
38345 }
38346
38347 @@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38348 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
38349 "Signaled System Error on %s\n",
38350 pci_name(dev));
38351 - atomic_inc(&pci_nonparity_count);
38352 + atomic_inc_unchecked(&pci_nonparity_count);
38353 }
38354
38355 if (status & (PCI_STATUS_PARITY)) {
38356 @@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38357 "Master Data Parity Error on "
38358 "%s\n", pci_name(dev));
38359
38360 - atomic_inc(&pci_parity_count);
38361 + atomic_inc_unchecked(&pci_parity_count);
38362 }
38363
38364 if (status & (PCI_STATUS_DETECTED_PARITY)) {
38365 @@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
38366 "Detected Parity Error on %s\n",
38367 pci_name(dev));
38368
38369 - atomic_inc(&pci_parity_count);
38370 + atomic_inc_unchecked(&pci_parity_count);
38371 }
38372 }
38373 }
38374 @@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
38375 if (!check_pci_errors)
38376 return;
38377
38378 - before_count = atomic_read(&pci_parity_count);
38379 + before_count = atomic_read_unchecked(&pci_parity_count);
38380
38381 /* scan all PCI devices looking for a Parity Error on devices and
38382 * bridges.
38383 @@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
38384 /* Only if operator has selected panic on PCI Error */
38385 if (edac_pci_get_panic_on_pe()) {
38386 /* If the count is different 'after' from 'before' */
38387 - if (before_count != atomic_read(&pci_parity_count))
38388 + if (before_count != atomic_read_unchecked(&pci_parity_count))
38389 panic("EDAC: PCI Parity Error");
38390 }
38391 }
38392 diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
38393 index 51b7e3a..aa8a3e8 100644
38394 --- a/drivers/edac/mce_amd.h
38395 +++ b/drivers/edac/mce_amd.h
38396 @@ -77,7 +77,7 @@ struct amd_decoder_ops {
38397 bool (*mc0_mce)(u16, u8);
38398 bool (*mc1_mce)(u16, u8);
38399 bool (*mc2_mce)(u16, u8);
38400 -};
38401 +} __no_const;
38402
38403 void amd_report_gart_errors(bool);
38404 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
38405 diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
38406 index 57ea7f4..af06b76 100644
38407 --- a/drivers/firewire/core-card.c
38408 +++ b/drivers/firewire/core-card.c
38409 @@ -528,9 +528,9 @@ void fw_card_initialize(struct fw_card *card,
38410 const struct fw_card_driver *driver,
38411 struct device *device)
38412 {
38413 - static atomic_t index = ATOMIC_INIT(-1);
38414 + static atomic_unchecked_t index = ATOMIC_INIT(-1);
38415
38416 - card->index = atomic_inc_return(&index);
38417 + card->index = atomic_inc_return_unchecked(&index);
38418 card->driver = driver;
38419 card->device = device;
38420 card->current_tlabel = 0;
38421 @@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
38422
38423 void fw_core_remove_card(struct fw_card *card)
38424 {
38425 - struct fw_card_driver dummy_driver = dummy_driver_template;
38426 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
38427
38428 card->driver->update_phy_reg(card, 4,
38429 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
38430 diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
38431 index de4aa40..49ab1f2 100644
38432 --- a/drivers/firewire/core-device.c
38433 +++ b/drivers/firewire/core-device.c
38434 @@ -253,7 +253,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
38435 struct config_rom_attribute {
38436 struct device_attribute attr;
38437 u32 key;
38438 -};
38439 +} __do_const;
38440
38441 static ssize_t show_immediate(struct device *dev,
38442 struct device_attribute *dattr, char *buf)
38443 diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
38444 index e5af0e3..d318058 100644
38445 --- a/drivers/firewire/core-transaction.c
38446 +++ b/drivers/firewire/core-transaction.c
38447 @@ -38,6 +38,7 @@
38448 #include <linux/timer.h>
38449 #include <linux/types.h>
38450 #include <linux/workqueue.h>
38451 +#include <linux/sched.h>
38452
38453 #include <asm/byteorder.h>
38454
38455 diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
38456 index 515a42c..5ecf3ba 100644
38457 --- a/drivers/firewire/core.h
38458 +++ b/drivers/firewire/core.h
38459 @@ -111,6 +111,7 @@ struct fw_card_driver {
38460
38461 int (*stop_iso)(struct fw_iso_context *ctx);
38462 };
38463 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
38464
38465 void fw_card_initialize(struct fw_card *card,
38466 const struct fw_card_driver *driver, struct device *device);
38467 diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
38468 index 94a58a0..f5eba42 100644
38469 --- a/drivers/firmware/dmi-id.c
38470 +++ b/drivers/firmware/dmi-id.c
38471 @@ -16,7 +16,7 @@
38472 struct dmi_device_attribute{
38473 struct device_attribute dev_attr;
38474 int field;
38475 -};
38476 +} __do_const;
38477 #define to_dmi_dev_attr(_dev_attr) \
38478 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
38479
38480 diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
38481 index fa0affb..aa448eb 100644
38482 --- a/drivers/firmware/dmi_scan.c
38483 +++ b/drivers/firmware/dmi_scan.c
38484 @@ -791,7 +791,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
38485 if (buf == NULL)
38486 return -1;
38487
38488 - dmi_table(buf, dmi_len, dmi_num, decode, private_data);
38489 + dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
38490
38491 iounmap(buf);
38492 return 0;
38493 diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
38494 index 5145fa3..0d3babd 100644
38495 --- a/drivers/firmware/efi/efi.c
38496 +++ b/drivers/firmware/efi/efi.c
38497 @@ -65,14 +65,16 @@ static struct attribute_group efi_subsys_attr_group = {
38498 };
38499
38500 static struct efivars generic_efivars;
38501 -static struct efivar_operations generic_ops;
38502 +static efivar_operations_no_const generic_ops __read_only;
38503
38504 static int generic_ops_register(void)
38505 {
38506 - generic_ops.get_variable = efi.get_variable;
38507 - generic_ops.set_variable = efi.set_variable;
38508 - generic_ops.get_next_variable = efi.get_next_variable;
38509 - generic_ops.query_variable_store = efi_query_variable_store;
38510 + pax_open_kernel();
38511 + *(void **)&generic_ops.get_variable = efi.get_variable;
38512 + *(void **)&generic_ops.set_variable = efi.set_variable;
38513 + *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
38514 + *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
38515 + pax_close_kernel();
38516
38517 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
38518 }
38519 diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
38520 index 8c5a61a..cf07bd0 100644
38521 --- a/drivers/firmware/efi/efivars.c
38522 +++ b/drivers/firmware/efi/efivars.c
38523 @@ -456,7 +456,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
38524 static int
38525 create_efivars_bin_attributes(void)
38526 {
38527 - struct bin_attribute *attr;
38528 + bin_attribute_no_const *attr;
38529 int error;
38530
38531 /* new_var */
38532 diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
38533 index 2a90ba6..07f3733 100644
38534 --- a/drivers/firmware/google/memconsole.c
38535 +++ b/drivers/firmware/google/memconsole.c
38536 @@ -147,7 +147,9 @@ static int __init memconsole_init(void)
38537 if (!found_memconsole())
38538 return -ENODEV;
38539
38540 - memconsole_bin_attr.size = memconsole_length;
38541 + pax_open_kernel();
38542 + *(size_t *)&memconsole_bin_attr.size = memconsole_length;
38543 + pax_close_kernel();
38544
38545 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
38546
38547 diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
38548 index 814addb..0937d7f 100644
38549 --- a/drivers/gpio/gpio-ich.c
38550 +++ b/drivers/gpio/gpio-ich.c
38551 @@ -71,7 +71,7 @@ struct ichx_desc {
38552 /* Some chipsets have quirks, let these use their own request/get */
38553 int (*request)(struct gpio_chip *chip, unsigned offset);
38554 int (*get)(struct gpio_chip *chip, unsigned offset);
38555 -};
38556 +} __do_const;
38557
38558 static struct {
38559 spinlock_t lock;
38560 diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
38561 index 9902732..64b62dd 100644
38562 --- a/drivers/gpio/gpio-vr41xx.c
38563 +++ b/drivers/gpio/gpio-vr41xx.c
38564 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
38565 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
38566 maskl, pendl, maskh, pendh);
38567
38568 - atomic_inc(&irq_err_count);
38569 + atomic_inc_unchecked(&irq_err_count);
38570
38571 return -EINVAL;
38572 }
38573 diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
38574 index c722c3b..2ec6040 100644
38575 --- a/drivers/gpu/drm/drm_crtc_helper.c
38576 +++ b/drivers/gpu/drm/drm_crtc_helper.c
38577 @@ -328,7 +328,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
38578 struct drm_crtc *tmp;
38579 int crtc_mask = 1;
38580
38581 - WARN(!crtc, "checking null crtc?\n");
38582 + BUG_ON(!crtc);
38583
38584 dev = crtc->dev;
38585
38586 diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
38587 index fe58d08..07bc38e 100644
38588 --- a/drivers/gpu/drm/drm_drv.c
38589 +++ b/drivers/gpu/drm/drm_drv.c
38590 @@ -186,7 +186,7 @@ static void drm_legacy_dev_reinit(struct drm_device *dev)
38591 atomic_set(&dev->vma_count, 0);
38592
38593 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
38594 - atomic_set(&dev->counts[i], 0);
38595 + atomic_set_unchecked(&dev->counts[i], 0);
38596
38597 dev->sigdata.lock = NULL;
38598
38599 @@ -302,7 +302,7 @@ module_exit(drm_core_exit);
38600 /**
38601 * Copy and IOCTL return string to user space
38602 */
38603 -static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
38604 +static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
38605 {
38606 int len;
38607
38608 @@ -372,7 +372,7 @@ long drm_ioctl(struct file *filp,
38609 struct drm_file *file_priv = filp->private_data;
38610 struct drm_device *dev;
38611 const struct drm_ioctl_desc *ioctl = NULL;
38612 - drm_ioctl_t *func;
38613 + drm_ioctl_no_const_t func;
38614 unsigned int nr = DRM_IOCTL_NR(cmd);
38615 int retcode = -EINVAL;
38616 char stack_kdata[128];
38617 @@ -385,7 +385,7 @@ long drm_ioctl(struct file *filp,
38618 return -ENODEV;
38619
38620 atomic_inc(&dev->ioctl_count);
38621 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
38622 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
38623 ++file_priv->ioctl_count;
38624
38625 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
38626 diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
38627 index 3f84277..c627c54 100644
38628 --- a/drivers/gpu/drm/drm_fops.c
38629 +++ b/drivers/gpu/drm/drm_fops.c
38630 @@ -97,7 +97,7 @@ int drm_open(struct inode *inode, struct file *filp)
38631 if (drm_device_is_unplugged(dev))
38632 return -ENODEV;
38633
38634 - if (!dev->open_count++)
38635 + if (local_inc_return(&dev->open_count) == 1)
38636 need_setup = 1;
38637 mutex_lock(&dev->struct_mutex);
38638 old_imapping = inode->i_mapping;
38639 @@ -113,7 +113,7 @@ int drm_open(struct inode *inode, struct file *filp)
38640 retcode = drm_open_helper(inode, filp, dev);
38641 if (retcode)
38642 goto err_undo;
38643 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
38644 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
38645 if (need_setup) {
38646 retcode = drm_setup(dev);
38647 if (retcode)
38648 @@ -128,7 +128,7 @@ err_undo:
38649 iput(container_of(dev->dev_mapping, struct inode, i_data));
38650 dev->dev_mapping = old_mapping;
38651 mutex_unlock(&dev->struct_mutex);
38652 - dev->open_count--;
38653 + local_dec(&dev->open_count);
38654 return retcode;
38655 }
38656 EXPORT_SYMBOL(drm_open);
38657 @@ -405,7 +405,7 @@ int drm_release(struct inode *inode, struct file *filp)
38658
38659 mutex_lock(&drm_global_mutex);
38660
38661 - DRM_DEBUG("open_count = %d\n", dev->open_count);
38662 + DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
38663
38664 if (dev->driver->preclose)
38665 dev->driver->preclose(dev, file_priv);
38666 @@ -414,10 +414,10 @@ int drm_release(struct inode *inode, struct file *filp)
38667 * Begin inline drm_release
38668 */
38669
38670 - DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
38671 + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
38672 task_pid_nr(current),
38673 (long)old_encode_dev(file_priv->minor->device),
38674 - dev->open_count);
38675 + local_read(&dev->open_count));
38676
38677 /* Release any auth tokens that might point to this file_priv,
38678 (do that under the drm_global_mutex) */
38679 @@ -516,8 +516,8 @@ int drm_release(struct inode *inode, struct file *filp)
38680 * End inline drm_release
38681 */
38682
38683 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
38684 - if (!--dev->open_count) {
38685 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
38686 + if (local_dec_and_test(&dev->open_count)) {
38687 if (atomic_read(&dev->ioctl_count)) {
38688 DRM_ERROR("Device busy: %d\n",
38689 atomic_read(&dev->ioctl_count));
38690 diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
38691 index f731116..629842c 100644
38692 --- a/drivers/gpu/drm/drm_global.c
38693 +++ b/drivers/gpu/drm/drm_global.c
38694 @@ -36,7 +36,7 @@
38695 struct drm_global_item {
38696 struct mutex mutex;
38697 void *object;
38698 - int refcount;
38699 + atomic_t refcount;
38700 };
38701
38702 static struct drm_global_item glob[DRM_GLOBAL_NUM];
38703 @@ -49,7 +49,7 @@ void drm_global_init(void)
38704 struct drm_global_item *item = &glob[i];
38705 mutex_init(&item->mutex);
38706 item->object = NULL;
38707 - item->refcount = 0;
38708 + atomic_set(&item->refcount, 0);
38709 }
38710 }
38711
38712 @@ -59,7 +59,7 @@ void drm_global_release(void)
38713 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
38714 struct drm_global_item *item = &glob[i];
38715 BUG_ON(item->object != NULL);
38716 - BUG_ON(item->refcount != 0);
38717 + BUG_ON(atomic_read(&item->refcount) != 0);
38718 }
38719 }
38720
38721 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
38722 void *object;
38723
38724 mutex_lock(&item->mutex);
38725 - if (item->refcount == 0) {
38726 + if (atomic_read(&item->refcount) == 0) {
38727 item->object = kzalloc(ref->size, GFP_KERNEL);
38728 if (unlikely(item->object == NULL)) {
38729 ret = -ENOMEM;
38730 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
38731 goto out_err;
38732
38733 }
38734 - ++item->refcount;
38735 + atomic_inc(&item->refcount);
38736 ref->object = item->object;
38737 object = item->object;
38738 mutex_unlock(&item->mutex);
38739 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
38740 struct drm_global_item *item = &glob[ref->global_type];
38741
38742 mutex_lock(&item->mutex);
38743 - BUG_ON(item->refcount == 0);
38744 + BUG_ON(atomic_read(&item->refcount) == 0);
38745 BUG_ON(ref->object != item->object);
38746 - if (--item->refcount == 0) {
38747 + if (atomic_dec_and_test(&item->refcount)) {
38748 ref->release(ref);
38749 item->object = NULL;
38750 }
38751 diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
38752 index 5329832..b503f49 100644
38753 --- a/drivers/gpu/drm/drm_info.c
38754 +++ b/drivers/gpu/drm/drm_info.c
38755 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
38756 struct drm_local_map *map;
38757 struct drm_map_list *r_list;
38758
38759 - /* Hardcoded from _DRM_FRAME_BUFFER,
38760 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
38761 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
38762 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
38763 + static const char * const types[] = {
38764 + [_DRM_FRAME_BUFFER] = "FB",
38765 + [_DRM_REGISTERS] = "REG",
38766 + [_DRM_SHM] = "SHM",
38767 + [_DRM_AGP] = "AGP",
38768 + [_DRM_SCATTER_GATHER] = "SG",
38769 + [_DRM_CONSISTENT] = "PCI",
38770 + [_DRM_GEM] = "GEM" };
38771 const char *type;
38772 int i;
38773
38774 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
38775 map = r_list->map;
38776 if (!map)
38777 continue;
38778 - if (map->type < 0 || map->type > 5)
38779 + if (map->type >= ARRAY_SIZE(types))
38780 type = "??";
38781 else
38782 type = types[map->type];
38783 @@ -257,7 +261,11 @@ int drm_vma_info(struct seq_file *m, void *data)
38784 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
38785 vma->vm_flags & VM_LOCKED ? 'l' : '-',
38786 vma->vm_flags & VM_IO ? 'i' : '-',
38787 +#ifdef CONFIG_GRKERNSEC_HIDESYM
38788 + 0);
38789 +#else
38790 vma->vm_pgoff);
38791 +#endif
38792
38793 #if defined(__i386__)
38794 pgprot = pgprot_val(vma->vm_page_prot);
38795 diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
38796 index 2f4c434..dd12cd2 100644
38797 --- a/drivers/gpu/drm/drm_ioc32.c
38798 +++ b/drivers/gpu/drm/drm_ioc32.c
38799 @@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
38800 request = compat_alloc_user_space(nbytes);
38801 if (!access_ok(VERIFY_WRITE, request, nbytes))
38802 return -EFAULT;
38803 - list = (struct drm_buf_desc *) (request + 1);
38804 + list = (struct drm_buf_desc __user *) (request + 1);
38805
38806 if (__put_user(count, &request->count)
38807 || __put_user(list, &request->list))
38808 @@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
38809 request = compat_alloc_user_space(nbytes);
38810 if (!access_ok(VERIFY_WRITE, request, nbytes))
38811 return -EFAULT;
38812 - list = (struct drm_buf_pub *) (request + 1);
38813 + list = (struct drm_buf_pub __user *) (request + 1);
38814
38815 if (__put_user(count, &request->count)
38816 || __put_user(list, &request->list))
38817 @@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
38818 return 0;
38819 }
38820
38821 -drm_ioctl_compat_t *drm_compat_ioctls[] = {
38822 +drm_ioctl_compat_t drm_compat_ioctls[] = {
38823 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
38824 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
38825 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
38826 @@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
38827 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38828 {
38829 unsigned int nr = DRM_IOCTL_NR(cmd);
38830 - drm_ioctl_compat_t *fn;
38831 int ret;
38832
38833 /* Assume that ioctls without an explicit compat routine will just
38834 @@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38835 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
38836 return drm_ioctl(filp, cmd, arg);
38837
38838 - fn = drm_compat_ioctls[nr];
38839 -
38840 - if (fn != NULL)
38841 - ret = (*fn) (filp, cmd, arg);
38842 + if (drm_compat_ioctls[nr] != NULL)
38843 + ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
38844 else
38845 ret = drm_ioctl(filp, cmd, arg);
38846
38847 diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
38848 index d752c96..fe08455 100644
38849 --- a/drivers/gpu/drm/drm_lock.c
38850 +++ b/drivers/gpu/drm/drm_lock.c
38851 @@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
38852 if (drm_lock_take(&master->lock, lock->context)) {
38853 master->lock.file_priv = file_priv;
38854 master->lock.lock_time = jiffies;
38855 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
38856 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
38857 break; /* Got lock */
38858 }
38859
38860 @@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
38861 return -EINVAL;
38862 }
38863
38864 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
38865 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
38866
38867 if (drm_lock_free(&master->lock, lock->context)) {
38868 /* FIXME: Should really bail out here. */
38869 diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
38870 index 39d8645..59e06fa 100644
38871 --- a/drivers/gpu/drm/drm_stub.c
38872 +++ b/drivers/gpu/drm/drm_stub.c
38873 @@ -484,7 +484,7 @@ void drm_unplug_dev(struct drm_device *dev)
38874
38875 drm_device_set_unplugged(dev);
38876
38877 - if (dev->open_count == 0) {
38878 + if (local_read(&dev->open_count) == 0) {
38879 drm_put_dev(dev);
38880 }
38881 mutex_unlock(&drm_global_mutex);
38882 diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
38883 index 2290b3b..22056a1 100644
38884 --- a/drivers/gpu/drm/drm_sysfs.c
38885 +++ b/drivers/gpu/drm/drm_sysfs.c
38886 @@ -524,7 +524,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
38887 int drm_sysfs_device_add(struct drm_minor *minor)
38888 {
38889 int err;
38890 - char *minor_str;
38891 + const char *minor_str;
38892
38893 minor->kdev.parent = minor->dev->dev;
38894
38895 diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
38896 index ab1892eb..d7009ca 100644
38897 --- a/drivers/gpu/drm/i810/i810_dma.c
38898 +++ b/drivers/gpu/drm/i810/i810_dma.c
38899 @@ -944,8 +944,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
38900 dma->buflist[vertex->idx],
38901 vertex->discard, vertex->used);
38902
38903 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
38904 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
38905 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
38906 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
38907 sarea_priv->last_enqueue = dev_priv->counter - 1;
38908 sarea_priv->last_dispatch = (int)hw_status[5];
38909
38910 @@ -1105,8 +1105,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
38911 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
38912 mc->last_render);
38913
38914 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
38915 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
38916 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
38917 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
38918 sarea_priv->last_enqueue = dev_priv->counter - 1;
38919 sarea_priv->last_dispatch = (int)hw_status[5];
38920
38921 diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
38922 index d4d16ed..8fb0b51 100644
38923 --- a/drivers/gpu/drm/i810/i810_drv.h
38924 +++ b/drivers/gpu/drm/i810/i810_drv.h
38925 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
38926 int page_flipping;
38927
38928 wait_queue_head_t irq_queue;
38929 - atomic_t irq_received;
38930 - atomic_t irq_emitted;
38931 + atomic_unchecked_t irq_received;
38932 + atomic_unchecked_t irq_emitted;
38933
38934 int front_offset;
38935 } drm_i810_private_t;
38936 diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
38937 index a6f4cb5..6b2beb2 100644
38938 --- a/drivers/gpu/drm/i915/i915_debugfs.c
38939 +++ b/drivers/gpu/drm/i915/i915_debugfs.c
38940 @@ -624,7 +624,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
38941 I915_READ(GTIMR));
38942 }
38943 seq_printf(m, "Interrupts received: %d\n",
38944 - atomic_read(&dev_priv->irq_received));
38945 + atomic_read_unchecked(&dev_priv->irq_received));
38946 for_each_ring(ring, dev_priv, i) {
38947 if (IS_GEN6(dev) || IS_GEN7(dev)) {
38948 seq_printf(m,
38949 diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
38950 index d5c784d..06e5c36 100644
38951 --- a/drivers/gpu/drm/i915/i915_dma.c
38952 +++ b/drivers/gpu/drm/i915/i915_dma.c
38953 @@ -1263,7 +1263,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
38954 bool can_switch;
38955
38956 spin_lock(&dev->count_lock);
38957 - can_switch = (dev->open_count == 0);
38958 + can_switch = (local_read(&dev->open_count) == 0);
38959 spin_unlock(&dev->count_lock);
38960 return can_switch;
38961 }
38962 diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
38963 index ab0f2c0..53c1bda 100644
38964 --- a/drivers/gpu/drm/i915/i915_drv.h
38965 +++ b/drivers/gpu/drm/i915/i915_drv.h
38966 @@ -1181,7 +1181,7 @@ typedef struct drm_i915_private {
38967 drm_dma_handle_t *status_page_dmah;
38968 struct resource mch_res;
38969
38970 - atomic_t irq_received;
38971 + atomic_unchecked_t irq_received;
38972
38973 /* protects the irq masks */
38974 spinlock_t irq_lock;
38975 diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38976 index bf34577..3fd2ffa 100644
38977 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38978 +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
38979 @@ -768,9 +768,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
38980
38981 static int
38982 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
38983 - int count)
38984 + unsigned int count)
38985 {
38986 - int i;
38987 + unsigned int i;
38988 int relocs_total = 0;
38989 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
38990
38991 diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
38992 index 3c59584..500f2e9 100644
38993 --- a/drivers/gpu/drm/i915/i915_ioc32.c
38994 +++ b/drivers/gpu/drm/i915/i915_ioc32.c
38995 @@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
38996 (unsigned long)request);
38997 }
38998
38999 -static drm_ioctl_compat_t *i915_compat_ioctls[] = {
39000 +static drm_ioctl_compat_t i915_compat_ioctls[] = {
39001 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
39002 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
39003 [DRM_I915_GETPARAM] = compat_i915_getparam,
39004 @@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
39005 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39006 {
39007 unsigned int nr = DRM_IOCTL_NR(cmd);
39008 - drm_ioctl_compat_t *fn = NULL;
39009 int ret;
39010
39011 if (nr < DRM_COMMAND_BASE)
39012 return drm_compat_ioctl(filp, cmd, arg);
39013
39014 - if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
39015 - fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
39016 -
39017 - if (fn != NULL)
39018 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
39019 + drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
39020 ret = (*fn) (filp, cmd, arg);
39021 - else
39022 + } else
39023 ret = drm_ioctl(filp, cmd, arg);
39024
39025 return ret;
39026 diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
39027 index 4b91228..590c643 100644
39028 --- a/drivers/gpu/drm/i915/i915_irq.c
39029 +++ b/drivers/gpu/drm/i915/i915_irq.c
39030 @@ -1085,7 +1085,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
39031 int pipe;
39032 u32 pipe_stats[I915_MAX_PIPES];
39033
39034 - atomic_inc(&dev_priv->irq_received);
39035 + atomic_inc_unchecked(&dev_priv->irq_received);
39036
39037 while (true) {
39038 iir = I915_READ(VLV_IIR);
39039 @@ -1390,7 +1390,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
39040 irqreturn_t ret = IRQ_NONE;
39041 bool err_int_reenable = false;
39042
39043 - atomic_inc(&dev_priv->irq_received);
39044 + atomic_inc_unchecked(&dev_priv->irq_received);
39045
39046 /* We get interrupts on unclaimed registers, so check for this before we
39047 * do any I915_{READ,WRITE}. */
39048 @@ -2146,7 +2146,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
39049 {
39050 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39051
39052 - atomic_set(&dev_priv->irq_received, 0);
39053 + atomic_set_unchecked(&dev_priv->irq_received, 0);
39054
39055 I915_WRITE(HWSTAM, 0xeffe);
39056
39057 @@ -2164,7 +2164,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
39058 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39059 int pipe;
39060
39061 - atomic_set(&dev_priv->irq_received, 0);
39062 + atomic_set_unchecked(&dev_priv->irq_received, 0);
39063
39064 /* VLV magic */
39065 I915_WRITE(VLV_IMR, 0);
39066 @@ -2452,7 +2452,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
39067 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39068 int pipe;
39069
39070 - atomic_set(&dev_priv->irq_received, 0);
39071 + atomic_set_unchecked(&dev_priv->irq_received, 0);
39072
39073 for_each_pipe(pipe)
39074 I915_WRITE(PIPESTAT(pipe), 0);
39075 @@ -2530,7 +2530,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
39076 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
39077 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
39078
39079 - atomic_inc(&dev_priv->irq_received);
39080 + atomic_inc_unchecked(&dev_priv->irq_received);
39081
39082 iir = I915_READ16(IIR);
39083 if (iir == 0)
39084 @@ -2604,7 +2604,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
39085 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39086 int pipe;
39087
39088 - atomic_set(&dev_priv->irq_received, 0);
39089 + atomic_set_unchecked(&dev_priv->irq_received, 0);
39090
39091 if (I915_HAS_HOTPLUG(dev)) {
39092 I915_WRITE(PORT_HOTPLUG_EN, 0);
39093 @@ -2703,7 +2703,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
39094 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
39095 int pipe, ret = IRQ_NONE;
39096
39097 - atomic_inc(&dev_priv->irq_received);
39098 + atomic_inc_unchecked(&dev_priv->irq_received);
39099
39100 iir = I915_READ(IIR);
39101 do {
39102 @@ -2827,7 +2827,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
39103 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
39104 int pipe;
39105
39106 - atomic_set(&dev_priv->irq_received, 0);
39107 + atomic_set_unchecked(&dev_priv->irq_received, 0);
39108
39109 I915_WRITE(PORT_HOTPLUG_EN, 0);
39110 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
39111 @@ -2941,7 +2941,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
39112 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
39113 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
39114
39115 - atomic_inc(&dev_priv->irq_received);
39116 + atomic_inc_unchecked(&dev_priv->irq_received);
39117
39118 iir = I915_READ(IIR);
39119
39120 diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
39121 index f535670..bde09e2 100644
39122 --- a/drivers/gpu/drm/i915/intel_display.c
39123 +++ b/drivers/gpu/drm/i915/intel_display.c
39124 @@ -10019,13 +10019,13 @@ struct intel_quirk {
39125 int subsystem_vendor;
39126 int subsystem_device;
39127 void (*hook)(struct drm_device *dev);
39128 -};
39129 +} __do_const;
39130
39131 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
39132 struct intel_dmi_quirk {
39133 void (*hook)(struct drm_device *dev);
39134 const struct dmi_system_id (*dmi_id_list)[];
39135 -};
39136 +} __do_const;
39137
39138 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
39139 {
39140 @@ -10033,18 +10033,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
39141 return 1;
39142 }
39143
39144 -static const struct intel_dmi_quirk intel_dmi_quirks[] = {
39145 +static const struct dmi_system_id intel_dmi_quirks_table[] = {
39146 {
39147 - .dmi_id_list = &(const struct dmi_system_id[]) {
39148 - {
39149 - .callback = intel_dmi_reverse_brightness,
39150 - .ident = "NCR Corporation",
39151 - .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
39152 - DMI_MATCH(DMI_PRODUCT_NAME, ""),
39153 - },
39154 - },
39155 - { } /* terminating entry */
39156 + .callback = intel_dmi_reverse_brightness,
39157 + .ident = "NCR Corporation",
39158 + .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
39159 + DMI_MATCH(DMI_PRODUCT_NAME, ""),
39160 },
39161 + },
39162 + { } /* terminating entry */
39163 +};
39164 +
39165 +static const struct intel_dmi_quirk intel_dmi_quirks[] = {
39166 + {
39167 + .dmi_id_list = &intel_dmi_quirks_table,
39168 .hook = quirk_invert_brightness,
39169 },
39170 };
39171 diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
39172 index ca4bc54..ee598a2 100644
39173 --- a/drivers/gpu/drm/mga/mga_drv.h
39174 +++ b/drivers/gpu/drm/mga/mga_drv.h
39175 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
39176 u32 clear_cmd;
39177 u32 maccess;
39178
39179 - atomic_t vbl_received; /**< Number of vblanks received. */
39180 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
39181 wait_queue_head_t fence_queue;
39182 - atomic_t last_fence_retired;
39183 + atomic_unchecked_t last_fence_retired;
39184 u32 next_fence_to_post;
39185
39186 unsigned int fb_cpp;
39187 diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
39188 index 709e90d..89a1c0d 100644
39189 --- a/drivers/gpu/drm/mga/mga_ioc32.c
39190 +++ b/drivers/gpu/drm/mga/mga_ioc32.c
39191 @@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
39192 return 0;
39193 }
39194
39195 -drm_ioctl_compat_t *mga_compat_ioctls[] = {
39196 +drm_ioctl_compat_t mga_compat_ioctls[] = {
39197 [DRM_MGA_INIT] = compat_mga_init,
39198 [DRM_MGA_GETPARAM] = compat_mga_getparam,
39199 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
39200 @@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
39201 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39202 {
39203 unsigned int nr = DRM_IOCTL_NR(cmd);
39204 - drm_ioctl_compat_t *fn = NULL;
39205 int ret;
39206
39207 if (nr < DRM_COMMAND_BASE)
39208 return drm_compat_ioctl(filp, cmd, arg);
39209
39210 - if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
39211 - fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
39212 -
39213 - if (fn != NULL)
39214 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
39215 + drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
39216 ret = (*fn) (filp, cmd, arg);
39217 - else
39218 + } else
39219 ret = drm_ioctl(filp, cmd, arg);
39220
39221 return ret;
39222 diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
39223 index 598c281..60d590e 100644
39224 --- a/drivers/gpu/drm/mga/mga_irq.c
39225 +++ b/drivers/gpu/drm/mga/mga_irq.c
39226 @@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
39227 if (crtc != 0)
39228 return 0;
39229
39230 - return atomic_read(&dev_priv->vbl_received);
39231 + return atomic_read_unchecked(&dev_priv->vbl_received);
39232 }
39233
39234
39235 @@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
39236 /* VBLANK interrupt */
39237 if (status & MGA_VLINEPEN) {
39238 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
39239 - atomic_inc(&dev_priv->vbl_received);
39240 + atomic_inc_unchecked(&dev_priv->vbl_received);
39241 drm_handle_vblank(dev, 0);
39242 handled = 1;
39243 }
39244 @@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
39245 if ((prim_start & ~0x03) != (prim_end & ~0x03))
39246 MGA_WRITE(MGA_PRIMEND, prim_end);
39247
39248 - atomic_inc(&dev_priv->last_fence_retired);
39249 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
39250 DRM_WAKEUP(&dev_priv->fence_queue);
39251 handled = 1;
39252 }
39253 @@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
39254 * using fences.
39255 */
39256 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
39257 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
39258 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
39259 - *sequence) <= (1 << 23)));
39260
39261 *sequence = cur_fence;
39262 diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
39263 index 3e72876..d1c15ad 100644
39264 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c
39265 +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
39266 @@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
39267 struct bit_table {
39268 const char id;
39269 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
39270 -};
39271 +} __no_const;
39272
39273 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
39274
39275 diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
39276 index 994fd6e..6e12565 100644
39277 --- a/drivers/gpu/drm/nouveau/nouveau_drm.h
39278 +++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
39279 @@ -94,7 +94,6 @@ struct nouveau_drm {
39280 struct drm_global_reference mem_global_ref;
39281 struct ttm_bo_global_ref bo_global_ref;
39282 struct ttm_bo_device bdev;
39283 - atomic_t validate_sequence;
39284 int (*move)(struct nouveau_channel *,
39285 struct ttm_buffer_object *,
39286 struct ttm_mem_reg *, struct ttm_mem_reg *);
39287 diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
39288 index c1a7e5a..38b8539 100644
39289 --- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
39290 +++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
39291 @@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
39292 unsigned long arg)
39293 {
39294 unsigned int nr = DRM_IOCTL_NR(cmd);
39295 - drm_ioctl_compat_t *fn = NULL;
39296 + drm_ioctl_compat_t fn = NULL;
39297 int ret;
39298
39299 if (nr < DRM_COMMAND_BASE)
39300 diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
39301 index 81638d7..2e45854 100644
39302 --- a/drivers/gpu/drm/nouveau/nouveau_vga.c
39303 +++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
39304 @@ -65,7 +65,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
39305 bool can_switch;
39306
39307 spin_lock(&dev->count_lock);
39308 - can_switch = (dev->open_count == 0);
39309 + can_switch = (local_read(&dev->open_count) == 0);
39310 spin_unlock(&dev->count_lock);
39311 return can_switch;
39312 }
39313 diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
39314 index eb89653..613cf71 100644
39315 --- a/drivers/gpu/drm/qxl/qxl_cmd.c
39316 +++ b/drivers/gpu/drm/qxl/qxl_cmd.c
39317 @@ -285,27 +285,27 @@ static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port,
39318 int ret;
39319
39320 mutex_lock(&qdev->async_io_mutex);
39321 - irq_num = atomic_read(&qdev->irq_received_io_cmd);
39322 + irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
39323 if (qdev->last_sent_io_cmd > irq_num) {
39324 if (intr)
39325 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
39326 - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39327 + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39328 else
39329 ret = wait_event_timeout(qdev->io_cmd_event,
39330 - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39331 + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39332 /* 0 is timeout, just bail the "hw" has gone away */
39333 if (ret <= 0)
39334 goto out;
39335 - irq_num = atomic_read(&qdev->irq_received_io_cmd);
39336 + irq_num = atomic_read_unchecked(&qdev->irq_received_io_cmd);
39337 }
39338 outb(val, addr);
39339 qdev->last_sent_io_cmd = irq_num + 1;
39340 if (intr)
39341 ret = wait_event_interruptible_timeout(qdev->io_cmd_event,
39342 - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39343 + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39344 else
39345 ret = wait_event_timeout(qdev->io_cmd_event,
39346 - atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39347 + atomic_read_unchecked(&qdev->irq_received_io_cmd) > irq_num, 5*HZ);
39348 out:
39349 if (ret > 0)
39350 ret = 0;
39351 diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c
39352 index c3c2bbd..bc3c0fb 100644
39353 --- a/drivers/gpu/drm/qxl/qxl_debugfs.c
39354 +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c
39355 @@ -42,10 +42,10 @@ qxl_debugfs_irq_received(struct seq_file *m, void *data)
39356 struct drm_info_node *node = (struct drm_info_node *) m->private;
39357 struct qxl_device *qdev = node->minor->dev->dev_private;
39358
39359 - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received));
39360 - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display));
39361 - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor));
39362 - seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd));
39363 + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received));
39364 + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_display));
39365 + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_cursor));
39366 + seq_printf(m, "%d\n", atomic_read_unchecked(&qdev->irq_received_io_cmd));
39367 seq_printf(m, "%d\n", qdev->irq_received_error);
39368 return 0;
39369 }
39370 diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
39371 index f7c9add..fb971d2 100644
39372 --- a/drivers/gpu/drm/qxl/qxl_drv.h
39373 +++ b/drivers/gpu/drm/qxl/qxl_drv.h
39374 @@ -290,10 +290,10 @@ struct qxl_device {
39375 unsigned int last_sent_io_cmd;
39376
39377 /* interrupt handling */
39378 - atomic_t irq_received;
39379 - atomic_t irq_received_display;
39380 - atomic_t irq_received_cursor;
39381 - atomic_t irq_received_io_cmd;
39382 + atomic_unchecked_t irq_received;
39383 + atomic_unchecked_t irq_received_display;
39384 + atomic_unchecked_t irq_received_cursor;
39385 + atomic_unchecked_t irq_received_io_cmd;
39386 unsigned irq_received_error;
39387 wait_queue_head_t display_event;
39388 wait_queue_head_t cursor_event;
39389 diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
39390 index 21393dc..329f3a9 100644
39391 --- a/drivers/gpu/drm/qxl/qxl_irq.c
39392 +++ b/drivers/gpu/drm/qxl/qxl_irq.c
39393 @@ -33,19 +33,19 @@ irqreturn_t qxl_irq_handler(DRM_IRQ_ARGS)
39394
39395 pending = xchg(&qdev->ram_header->int_pending, 0);
39396
39397 - atomic_inc(&qdev->irq_received);
39398 + atomic_inc_unchecked(&qdev->irq_received);
39399
39400 if (pending & QXL_INTERRUPT_DISPLAY) {
39401 - atomic_inc(&qdev->irq_received_display);
39402 + atomic_inc_unchecked(&qdev->irq_received_display);
39403 wake_up_all(&qdev->display_event);
39404 qxl_queue_garbage_collect(qdev, false);
39405 }
39406 if (pending & QXL_INTERRUPT_CURSOR) {
39407 - atomic_inc(&qdev->irq_received_cursor);
39408 + atomic_inc_unchecked(&qdev->irq_received_cursor);
39409 wake_up_all(&qdev->cursor_event);
39410 }
39411 if (pending & QXL_INTERRUPT_IO_CMD) {
39412 - atomic_inc(&qdev->irq_received_io_cmd);
39413 + atomic_inc_unchecked(&qdev->irq_received_io_cmd);
39414 wake_up_all(&qdev->io_cmd_event);
39415 }
39416 if (pending & QXL_INTERRUPT_ERROR) {
39417 @@ -82,10 +82,10 @@ int qxl_irq_init(struct qxl_device *qdev)
39418 init_waitqueue_head(&qdev->io_cmd_event);
39419 INIT_WORK(&qdev->client_monitors_config_work,
39420 qxl_client_monitors_config_work_func);
39421 - atomic_set(&qdev->irq_received, 0);
39422 - atomic_set(&qdev->irq_received_display, 0);
39423 - atomic_set(&qdev->irq_received_cursor, 0);
39424 - atomic_set(&qdev->irq_received_io_cmd, 0);
39425 + atomic_set_unchecked(&qdev->irq_received, 0);
39426 + atomic_set_unchecked(&qdev->irq_received_display, 0);
39427 + atomic_set_unchecked(&qdev->irq_received_cursor, 0);
39428 + atomic_set_unchecked(&qdev->irq_received_io_cmd, 0);
39429 qdev->irq_received_error = 0;
39430 ret = drm_irq_install(qdev->ddev);
39431 qdev->ram_header->int_mask = QXL_INTERRUPT_MASK;
39432 diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
39433 index 037786d..2a95e33 100644
39434 --- a/drivers/gpu/drm/qxl/qxl_ttm.c
39435 +++ b/drivers/gpu/drm/qxl/qxl_ttm.c
39436 @@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
39437 }
39438 }
39439
39440 -static struct vm_operations_struct qxl_ttm_vm_ops;
39441 +static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
39442 static const struct vm_operations_struct *ttm_vm_ops;
39443
39444 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
39445 @@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
39446 return r;
39447 if (unlikely(ttm_vm_ops == NULL)) {
39448 ttm_vm_ops = vma->vm_ops;
39449 + pax_open_kernel();
39450 qxl_ttm_vm_ops = *ttm_vm_ops;
39451 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
39452 + pax_close_kernel();
39453 }
39454 vma->vm_ops = &qxl_ttm_vm_ops;
39455 return 0;
39456 @@ -558,25 +560,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
39457 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
39458 {
39459 #if defined(CONFIG_DEBUG_FS)
39460 - static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
39461 - static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
39462 - unsigned i;
39463 + static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
39464 + {
39465 + .name = "qxl_mem_mm",
39466 + .show = &qxl_mm_dump_table,
39467 + },
39468 + {
39469 + .name = "qxl_surf_mm",
39470 + .show = &qxl_mm_dump_table,
39471 + }
39472 + };
39473
39474 - for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
39475 - if (i == 0)
39476 - sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
39477 - else
39478 - sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
39479 - qxl_mem_types_list[i].name = qxl_mem_types_names[i];
39480 - qxl_mem_types_list[i].show = &qxl_mm_dump_table;
39481 - qxl_mem_types_list[i].driver_features = 0;
39482 - if (i == 0)
39483 - qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
39484 - else
39485 - qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
39486 + pax_open_kernel();
39487 + *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
39488 + *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
39489 + pax_close_kernel();
39490
39491 - }
39492 - return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
39493 + return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
39494 #else
39495 return 0;
39496 #endif
39497 diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
39498 index c451257..0ad2134 100644
39499 --- a/drivers/gpu/drm/r128/r128_cce.c
39500 +++ b/drivers/gpu/drm/r128/r128_cce.c
39501 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
39502
39503 /* GH: Simple idle check.
39504 */
39505 - atomic_set(&dev_priv->idle_count, 0);
39506 + atomic_set_unchecked(&dev_priv->idle_count, 0);
39507
39508 /* We don't support anything other than bus-mastering ring mode,
39509 * but the ring can be in either AGP or PCI space for the ring
39510 diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
39511 index 56eb5e3..c4ec43d 100644
39512 --- a/drivers/gpu/drm/r128/r128_drv.h
39513 +++ b/drivers/gpu/drm/r128/r128_drv.h
39514 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
39515 int is_pci;
39516 unsigned long cce_buffers_offset;
39517
39518 - atomic_t idle_count;
39519 + atomic_unchecked_t idle_count;
39520
39521 int page_flipping;
39522 int current_page;
39523 u32 crtc_offset;
39524 u32 crtc_offset_cntl;
39525
39526 - atomic_t vbl_received;
39527 + atomic_unchecked_t vbl_received;
39528
39529 u32 color_fmt;
39530 unsigned int front_offset;
39531 diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
39532 index a954c54..9cc595c 100644
39533 --- a/drivers/gpu/drm/r128/r128_ioc32.c
39534 +++ b/drivers/gpu/drm/r128/r128_ioc32.c
39535 @@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
39536 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
39537 }
39538
39539 -drm_ioctl_compat_t *r128_compat_ioctls[] = {
39540 +drm_ioctl_compat_t r128_compat_ioctls[] = {
39541 [DRM_R128_INIT] = compat_r128_init,
39542 [DRM_R128_DEPTH] = compat_r128_depth,
39543 [DRM_R128_STIPPLE] = compat_r128_stipple,
39544 @@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
39545 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39546 {
39547 unsigned int nr = DRM_IOCTL_NR(cmd);
39548 - drm_ioctl_compat_t *fn = NULL;
39549 int ret;
39550
39551 if (nr < DRM_COMMAND_BASE)
39552 return drm_compat_ioctl(filp, cmd, arg);
39553
39554 - if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
39555 - fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
39556 -
39557 - if (fn != NULL)
39558 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
39559 + drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
39560 ret = (*fn) (filp, cmd, arg);
39561 - else
39562 + } else
39563 ret = drm_ioctl(filp, cmd, arg);
39564
39565 return ret;
39566 diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
39567 index 2ea4f09..d391371 100644
39568 --- a/drivers/gpu/drm/r128/r128_irq.c
39569 +++ b/drivers/gpu/drm/r128/r128_irq.c
39570 @@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
39571 if (crtc != 0)
39572 return 0;
39573
39574 - return atomic_read(&dev_priv->vbl_received);
39575 + return atomic_read_unchecked(&dev_priv->vbl_received);
39576 }
39577
39578 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
39579 @@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
39580 /* VBLANK interrupt */
39581 if (status & R128_CRTC_VBLANK_INT) {
39582 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
39583 - atomic_inc(&dev_priv->vbl_received);
39584 + atomic_inc_unchecked(&dev_priv->vbl_received);
39585 drm_handle_vblank(dev, 0);
39586 return IRQ_HANDLED;
39587 }
39588 diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
39589 index 01dd9ae..6352f04 100644
39590 --- a/drivers/gpu/drm/r128/r128_state.c
39591 +++ b/drivers/gpu/drm/r128/r128_state.c
39592 @@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
39593
39594 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
39595 {
39596 - if (atomic_read(&dev_priv->idle_count) == 0)
39597 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
39598 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
39599 else
39600 - atomic_set(&dev_priv->idle_count, 0);
39601 + atomic_set_unchecked(&dev_priv->idle_count, 0);
39602 }
39603
39604 #endif
39605 diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
39606 index af85299..ed9ac8d 100644
39607 --- a/drivers/gpu/drm/radeon/mkregtable.c
39608 +++ b/drivers/gpu/drm/radeon/mkregtable.c
39609 @@ -624,14 +624,14 @@ static int parser_auth(struct table *t, const char *filename)
39610 regex_t mask_rex;
39611 regmatch_t match[4];
39612 char buf[1024];
39613 - size_t end;
39614 + long end;
39615 int len;
39616 int done = 0;
39617 int r;
39618 unsigned o;
39619 struct offset *offset;
39620 char last_reg_s[10];
39621 - int last_reg;
39622 + unsigned long last_reg;
39623
39624 if (regcomp
39625 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
39626 diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
39627 index 841d0e0..9eaa268 100644
39628 --- a/drivers/gpu/drm/radeon/radeon_device.c
39629 +++ b/drivers/gpu/drm/radeon/radeon_device.c
39630 @@ -1117,7 +1117,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
39631 bool can_switch;
39632
39633 spin_lock(&dev->count_lock);
39634 - can_switch = (dev->open_count == 0);
39635 + can_switch = (local_read(&dev->open_count) == 0);
39636 spin_unlock(&dev->count_lock);
39637 return can_switch;
39638 }
39639 diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
39640 index b369d42..8dd04eb 100644
39641 --- a/drivers/gpu/drm/radeon/radeon_drv.h
39642 +++ b/drivers/gpu/drm/radeon/radeon_drv.h
39643 @@ -258,7 +258,7 @@ typedef struct drm_radeon_private {
39644
39645 /* SW interrupt */
39646 wait_queue_head_t swi_queue;
39647 - atomic_t swi_emitted;
39648 + atomic_unchecked_t swi_emitted;
39649 int vblank_crtc;
39650 uint32_t irq_enable_reg;
39651 uint32_t r500_disp_irq_reg;
39652 diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
39653 index c180df8..5fd8186 100644
39654 --- a/drivers/gpu/drm/radeon/radeon_ioc32.c
39655 +++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
39656 @@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
39657 request = compat_alloc_user_space(sizeof(*request));
39658 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
39659 || __put_user(req32.param, &request->param)
39660 - || __put_user((void __user *)(unsigned long)req32.value,
39661 + || __put_user((unsigned long)req32.value,
39662 &request->value))
39663 return -EFAULT;
39664
39665 @@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
39666 #define compat_radeon_cp_setparam NULL
39667 #endif /* X86_64 || IA64 */
39668
39669 -static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
39670 +static drm_ioctl_compat_t radeon_compat_ioctls[] = {
39671 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
39672 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
39673 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
39674 @@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
39675 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
39676 {
39677 unsigned int nr = DRM_IOCTL_NR(cmd);
39678 - drm_ioctl_compat_t *fn = NULL;
39679 int ret;
39680
39681 if (nr < DRM_COMMAND_BASE)
39682 return drm_compat_ioctl(filp, cmd, arg);
39683
39684 - if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
39685 - fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
39686 -
39687 - if (fn != NULL)
39688 + if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
39689 + drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
39690 ret = (*fn) (filp, cmd, arg);
39691 - else
39692 + } else
39693 ret = drm_ioctl(filp, cmd, arg);
39694
39695 return ret;
39696 diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
39697 index 8d68e97..9dcfed8 100644
39698 --- a/drivers/gpu/drm/radeon/radeon_irq.c
39699 +++ b/drivers/gpu/drm/radeon/radeon_irq.c
39700 @@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
39701 unsigned int ret;
39702 RING_LOCALS;
39703
39704 - atomic_inc(&dev_priv->swi_emitted);
39705 - ret = atomic_read(&dev_priv->swi_emitted);
39706 + atomic_inc_unchecked(&dev_priv->swi_emitted);
39707 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
39708
39709 BEGIN_RING(4);
39710 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
39711 @@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
39712 drm_radeon_private_t *dev_priv =
39713 (drm_radeon_private_t *) dev->dev_private;
39714
39715 - atomic_set(&dev_priv->swi_emitted, 0);
39716 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
39717 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
39718
39719 dev->max_vblank_count = 0x001fffff;
39720 diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
39721 index 4d20910..6726b6d 100644
39722 --- a/drivers/gpu/drm/radeon/radeon_state.c
39723 +++ b/drivers/gpu/drm/radeon/radeon_state.c
39724 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
39725 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
39726 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
39727
39728 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
39729 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
39730 sarea_priv->nbox * sizeof(depth_boxes[0])))
39731 return -EFAULT;
39732
39733 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
39734 {
39735 drm_radeon_private_t *dev_priv = dev->dev_private;
39736 drm_radeon_getparam_t *param = data;
39737 - int value;
39738 + int value = 0;
39739
39740 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
39741
39742 diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
39743 index 71245d6..94c556d 100644
39744 --- a/drivers/gpu/drm/radeon/radeon_ttm.c
39745 +++ b/drivers/gpu/drm/radeon/radeon_ttm.c
39746 @@ -784,7 +784,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
39747 man->size = size >> PAGE_SHIFT;
39748 }
39749
39750 -static struct vm_operations_struct radeon_ttm_vm_ops;
39751 +static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
39752 static const struct vm_operations_struct *ttm_vm_ops = NULL;
39753
39754 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
39755 @@ -825,8 +825,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
39756 }
39757 if (unlikely(ttm_vm_ops == NULL)) {
39758 ttm_vm_ops = vma->vm_ops;
39759 + pax_open_kernel();
39760 radeon_ttm_vm_ops = *ttm_vm_ops;
39761 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
39762 + pax_close_kernel();
39763 }
39764 vma->vm_ops = &radeon_ttm_vm_ops;
39765 return 0;
39766 @@ -855,38 +857,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
39767 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
39768 {
39769 #if defined(CONFIG_DEBUG_FS)
39770 - static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
39771 - static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
39772 - unsigned i;
39773 + static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
39774 + {
39775 + .name = "radeon_vram_mm",
39776 + .show = &radeon_mm_dump_table,
39777 + },
39778 + {
39779 + .name = "radeon_gtt_mm",
39780 + .show = &radeon_mm_dump_table,
39781 + },
39782 + {
39783 + .name = "ttm_page_pool",
39784 + .show = &ttm_page_alloc_debugfs,
39785 + },
39786 + {
39787 + .name = "ttm_dma_page_pool",
39788 + .show = &ttm_dma_page_alloc_debugfs,
39789 + },
39790 + };
39791 + unsigned i = RADEON_DEBUGFS_MEM_TYPES + 1;
39792
39793 - for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
39794 - if (i == 0)
39795 - sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
39796 - else
39797 - sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
39798 - radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39799 - radeon_mem_types_list[i].show = &radeon_mm_dump_table;
39800 - radeon_mem_types_list[i].driver_features = 0;
39801 - if (i == 0)
39802 - radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
39803 - else
39804 - radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
39805 -
39806 - }
39807 - /* Add ttm page pool to debugfs */
39808 - sprintf(radeon_mem_types_names[i], "ttm_page_pool");
39809 - radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39810 - radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
39811 - radeon_mem_types_list[i].driver_features = 0;
39812 - radeon_mem_types_list[i++].data = NULL;
39813 + pax_open_kernel();
39814 + *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
39815 + *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
39816 + pax_close_kernel();
39817 #ifdef CONFIG_SWIOTLB
39818 - if (swiotlb_nr_tbl()) {
39819 - sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
39820 - radeon_mem_types_list[i].name = radeon_mem_types_names[i];
39821 - radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
39822 - radeon_mem_types_list[i].driver_features = 0;
39823 - radeon_mem_types_list[i++].data = NULL;
39824 - }
39825 + if (swiotlb_nr_tbl())
39826 + i++;
39827 #endif
39828 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
39829
39830 diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
39831 index 1447d79..40b2a5b 100644
39832 --- a/drivers/gpu/drm/radeon/rs690.c
39833 +++ b/drivers/gpu/drm/radeon/rs690.c
39834 @@ -345,9 +345,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
39835 if (max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
39836 rdev->pm.sideport_bandwidth.full)
39837 max_bandwidth = rdev->pm.sideport_bandwidth;
39838 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
39839 + read_delay_latency.full = dfixed_const(800 * 1000);
39840 read_delay_latency.full = dfixed_div(read_delay_latency,
39841 rdev->pm.igp_sideport_mclk);
39842 + a.full = dfixed_const(370);
39843 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
39844 } else {
39845 if (max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
39846 rdev->pm.k8_bandwidth.full)
39847 diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
39848 index dbc2def..0a9f710 100644
39849 --- a/drivers/gpu/drm/ttm/ttm_memory.c
39850 +++ b/drivers/gpu/drm/ttm/ttm_memory.c
39851 @@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
39852 zone->glob = glob;
39853 glob->zone_kernel = zone;
39854 ret = kobject_init_and_add(
39855 - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
39856 + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
39857 if (unlikely(ret != 0)) {
39858 kobject_put(&zone->kobj);
39859 return ret;
39860 @@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
39861 zone->glob = glob;
39862 glob->zone_dma32 = zone;
39863 ret = kobject_init_and_add(
39864 - &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
39865 + &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
39866 if (unlikely(ret != 0)) {
39867 kobject_put(&zone->kobj);
39868 return ret;
39869 diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
39870 index 863bef9..cba15cf 100644
39871 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
39872 +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
39873 @@ -391,9 +391,9 @@ out:
39874 static unsigned long
39875 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
39876 {
39877 - static atomic_t start_pool = ATOMIC_INIT(0);
39878 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
39879 unsigned i;
39880 - unsigned pool_offset = atomic_add_return(1, &start_pool);
39881 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
39882 struct ttm_page_pool *pool;
39883 int shrink_pages = sc->nr_to_scan;
39884 unsigned long freed = 0;
39885 diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
39886 index 97e9d61..bf23c461 100644
39887 --- a/drivers/gpu/drm/udl/udl_fb.c
39888 +++ b/drivers/gpu/drm/udl/udl_fb.c
39889 @@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
39890 fb_deferred_io_cleanup(info);
39891 kfree(info->fbdefio);
39892 info->fbdefio = NULL;
39893 - info->fbops->fb_mmap = udl_fb_mmap;
39894 }
39895
39896 pr_warn("released /dev/fb%d user=%d count=%d\n",
39897 diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
39898 index a811ef2..ff99b05 100644
39899 --- a/drivers/gpu/drm/via/via_drv.h
39900 +++ b/drivers/gpu/drm/via/via_drv.h
39901 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
39902 typedef uint32_t maskarray_t[5];
39903
39904 typedef struct drm_via_irq {
39905 - atomic_t irq_received;
39906 + atomic_unchecked_t irq_received;
39907 uint32_t pending_mask;
39908 uint32_t enable_mask;
39909 wait_queue_head_t irq_queue;
39910 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
39911 struct timeval last_vblank;
39912 int last_vblank_valid;
39913 unsigned usec_per_vblank;
39914 - atomic_t vbl_received;
39915 + atomic_unchecked_t vbl_received;
39916 drm_via_state_t hc_state;
39917 char pci_buf[VIA_PCI_BUF_SIZE];
39918 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
39919 diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
39920 index ac98964..5dbf512 100644
39921 --- a/drivers/gpu/drm/via/via_irq.c
39922 +++ b/drivers/gpu/drm/via/via_irq.c
39923 @@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
39924 if (crtc != 0)
39925 return 0;
39926
39927 - return atomic_read(&dev_priv->vbl_received);
39928 + return atomic_read_unchecked(&dev_priv->vbl_received);
39929 }
39930
39931 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39932 @@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39933
39934 status = VIA_READ(VIA_REG_INTERRUPT);
39935 if (status & VIA_IRQ_VBLANK_PENDING) {
39936 - atomic_inc(&dev_priv->vbl_received);
39937 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
39938 + atomic_inc_unchecked(&dev_priv->vbl_received);
39939 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
39940 do_gettimeofday(&cur_vblank);
39941 if (dev_priv->last_vblank_valid) {
39942 dev_priv->usec_per_vblank =
39943 @@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39944 dev_priv->last_vblank = cur_vblank;
39945 dev_priv->last_vblank_valid = 1;
39946 }
39947 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
39948 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
39949 DRM_DEBUG("US per vblank is: %u\n",
39950 dev_priv->usec_per_vblank);
39951 }
39952 @@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
39953
39954 for (i = 0; i < dev_priv->num_irqs; ++i) {
39955 if (status & cur_irq->pending_mask) {
39956 - atomic_inc(&cur_irq->irq_received);
39957 + atomic_inc_unchecked(&cur_irq->irq_received);
39958 DRM_WAKEUP(&cur_irq->irq_queue);
39959 handled = 1;
39960 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
39961 @@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
39962 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
39963 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
39964 masks[irq][4]));
39965 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
39966 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
39967 } else {
39968 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
39969 (((cur_irq_sequence =
39970 - atomic_read(&cur_irq->irq_received)) -
39971 + atomic_read_unchecked(&cur_irq->irq_received)) -
39972 *sequence) <= (1 << 23)));
39973 }
39974 *sequence = cur_irq_sequence;
39975 @@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
39976 }
39977
39978 for (i = 0; i < dev_priv->num_irqs; ++i) {
39979 - atomic_set(&cur_irq->irq_received, 0);
39980 + atomic_set_unchecked(&cur_irq->irq_received, 0);
39981 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
39982 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
39983 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
39984 @@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
39985 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
39986 case VIA_IRQ_RELATIVE:
39987 irqwait->request.sequence +=
39988 - atomic_read(&cur_irq->irq_received);
39989 + atomic_read_unchecked(&cur_irq->irq_received);
39990 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
39991 case VIA_IRQ_ABSOLUTE:
39992 break;
39993 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39994 index 150ec64..f5165f2 100644
39995 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39996 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
39997 @@ -290,7 +290,7 @@ struct vmw_private {
39998 * Fencing and IRQs.
39999 */
40000
40001 - atomic_t marker_seq;
40002 + atomic_unchecked_t marker_seq;
40003 wait_queue_head_t fence_queue;
40004 wait_queue_head_t fifo_queue;
40005 int fence_queue_waiters; /* Protected by hw_mutex */
40006 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
40007 index 3eb1486..0a47ee9 100644
40008 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
40009 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
40010 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
40011 (unsigned int) min,
40012 (unsigned int) fifo->capabilities);
40013
40014 - atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
40015 + atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
40016 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
40017 vmw_marker_queue_init(&fifo->marker_queue);
40018 return vmw_fifo_send_fence(dev_priv, &dummy);
40019 @@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
40020 if (reserveable)
40021 iowrite32(bytes, fifo_mem +
40022 SVGA_FIFO_RESERVED);
40023 - return fifo_mem + (next_cmd >> 2);
40024 + return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
40025 } else {
40026 need_bounce = true;
40027 }
40028 @@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
40029
40030 fm = vmw_fifo_reserve(dev_priv, bytes);
40031 if (unlikely(fm == NULL)) {
40032 - *seqno = atomic_read(&dev_priv->marker_seq);
40033 + *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
40034 ret = -ENOMEM;
40035 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
40036 false, 3*HZ);
40037 @@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
40038 }
40039
40040 do {
40041 - *seqno = atomic_add_return(1, &dev_priv->marker_seq);
40042 + *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
40043 } while (*seqno == 0);
40044
40045 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
40046 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
40047 index c509d40..3b640c3 100644
40048 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
40049 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
40050 @@ -138,7 +138,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
40051 int ret;
40052
40053 num_clips = arg->num_clips;
40054 - clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
40055 + clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
40056
40057 if (unlikely(num_clips == 0))
40058 return 0;
40059 @@ -222,7 +222,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
40060 int ret;
40061
40062 num_clips = arg->num_clips;
40063 - clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
40064 + clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
40065
40066 if (unlikely(num_clips == 0))
40067 return 0;
40068 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
40069 index 4640adb..e1384ed 100644
40070 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
40071 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
40072 @@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
40073 * emitted. Then the fence is stale and signaled.
40074 */
40075
40076 - ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
40077 + ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
40078 > VMW_FENCE_WRAP);
40079
40080 return ret;
40081 @@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
40082
40083 if (fifo_idle)
40084 down_read(&fifo_state->rwsem);
40085 - signal_seq = atomic_read(&dev_priv->marker_seq);
40086 + signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
40087 ret = 0;
40088
40089 for (;;) {
40090 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
40091 index 8a8725c2..afed796 100644
40092 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
40093 +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
40094 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
40095 while (!vmw_lag_lt(queue, us)) {
40096 spin_lock(&queue->lock);
40097 if (list_empty(&queue->head))
40098 - seqno = atomic_read(&dev_priv->marker_seq);
40099 + seqno = atomic_read_unchecked(&dev_priv->marker_seq);
40100 else {
40101 marker = list_first_entry(&queue->head,
40102 struct vmw_marker, head);
40103 diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
40104 index b1a05ad..1c9d899 100644
40105 --- a/drivers/gpu/host1x/drm/dc.c
40106 +++ b/drivers/gpu/host1x/drm/dc.c
40107 @@ -1004,7 +1004,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
40108 }
40109
40110 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
40111 - dc->debugfs_files[i].data = dc;
40112 + *(void **)&dc->debugfs_files[i].data = dc;
40113
40114 err = drm_debugfs_create_files(dc->debugfs_files,
40115 ARRAY_SIZE(debugfs_files),
40116 diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
40117 index ec0ae2d..dc0780b 100644
40118 --- a/drivers/gpu/vga/vga_switcheroo.c
40119 +++ b/drivers/gpu/vga/vga_switcheroo.c
40120 @@ -643,7 +643,7 @@ static int vga_switcheroo_runtime_resume(struct device *dev)
40121
40122 /* this version is for the case where the power switch is separate
40123 to the device being powered down. */
40124 -int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain)
40125 +int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain)
40126 {
40127 /* copy over all the bus versions */
40128 if (dev->bus && dev->bus->pm) {
40129 @@ -688,7 +688,7 @@ static int vga_switcheroo_runtime_resume_hdmi_audio(struct device *dev)
40130 return ret;
40131 }
40132
40133 -int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain)
40134 +int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain)
40135 {
40136 /* copy over all the bus versions */
40137 if (dev->bus && dev->bus->pm) {
40138 diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
40139 index aedfe50..1dc929b 100644
40140 --- a/drivers/hid/hid-core.c
40141 +++ b/drivers/hid/hid-core.c
40142 @@ -2416,7 +2416,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
40143
40144 int hid_add_device(struct hid_device *hdev)
40145 {
40146 - static atomic_t id = ATOMIC_INIT(0);
40147 + static atomic_unchecked_t id = ATOMIC_INIT(0);
40148 int ret;
40149
40150 if (WARN_ON(hdev->status & HID_STAT_ADDED))
40151 @@ -2450,7 +2450,7 @@ int hid_add_device(struct hid_device *hdev)
40152 /* XXX hack, any other cleaner solution after the driver core
40153 * is converted to allow more than 20 bytes as the device name? */
40154 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
40155 - hdev->vendor, hdev->product, atomic_inc_return(&id));
40156 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
40157
40158 hid_debug_register(hdev, dev_name(&hdev->dev));
40159 ret = device_add(&hdev->dev);
40160 diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
40161 index c13fb5b..55a3802 100644
40162 --- a/drivers/hid/hid-wiimote-debug.c
40163 +++ b/drivers/hid/hid-wiimote-debug.c
40164 @@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
40165 else if (size == 0)
40166 return -EIO;
40167
40168 - if (copy_to_user(u, buf, size))
40169 + if (size > sizeof(buf) || copy_to_user(u, buf, size))
40170 return -EFAULT;
40171
40172 *off += size;
40173 diff --git a/drivers/hid/uhid.c b/drivers/hid/uhid.c
40174 index cedc6da..2c3da2a 100644
40175 --- a/drivers/hid/uhid.c
40176 +++ b/drivers/hid/uhid.c
40177 @@ -47,7 +47,7 @@ struct uhid_device {
40178 struct mutex report_lock;
40179 wait_queue_head_t report_wait;
40180 atomic_t report_done;
40181 - atomic_t report_id;
40182 + atomic_unchecked_t report_id;
40183 struct uhid_event report_buf;
40184 };
40185
40186 @@ -163,7 +163,7 @@ static int uhid_hid_get_raw(struct hid_device *hid, unsigned char rnum,
40187
40188 spin_lock_irqsave(&uhid->qlock, flags);
40189 ev->type = UHID_FEATURE;
40190 - ev->u.feature.id = atomic_inc_return(&uhid->report_id);
40191 + ev->u.feature.id = atomic_inc_return_unchecked(&uhid->report_id);
40192 ev->u.feature.rnum = rnum;
40193 ev->u.feature.rtype = report_type;
40194
40195 @@ -446,7 +446,7 @@ static int uhid_dev_feature_answer(struct uhid_device *uhid,
40196 spin_lock_irqsave(&uhid->qlock, flags);
40197
40198 /* id for old report; drop it silently */
40199 - if (atomic_read(&uhid->report_id) != ev->u.feature_answer.id)
40200 + if (atomic_read_unchecked(&uhid->report_id) != ev->u.feature_answer.id)
40201 goto unlock;
40202 if (atomic_read(&uhid->report_done))
40203 goto unlock;
40204 diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
40205 index 6de6c98..18319e9 100644
40206 --- a/drivers/hv/channel.c
40207 +++ b/drivers/hv/channel.c
40208 @@ -406,8 +406,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
40209 int ret = 0;
40210 int t;
40211
40212 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
40213 - atomic_inc(&vmbus_connection.next_gpadl_handle);
40214 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
40215 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
40216
40217 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
40218 if (ret)
40219 diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
40220 index 88f4096..e50452e 100644
40221 --- a/drivers/hv/hv.c
40222 +++ b/drivers/hv/hv.c
40223 @@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
40224 u64 output_address = (output) ? virt_to_phys(output) : 0;
40225 u32 output_address_hi = output_address >> 32;
40226 u32 output_address_lo = output_address & 0xFFFFFFFF;
40227 - void *hypercall_page = hv_context.hypercall_page;
40228 + void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
40229
40230 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
40231 "=a"(hv_status_lo) : "d" (control_hi),
40232 diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
40233 index 7e17a54..a50a33d 100644
40234 --- a/drivers/hv/hv_balloon.c
40235 +++ b/drivers/hv/hv_balloon.c
40236 @@ -464,7 +464,7 @@ MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
40237
40238 module_param(pressure_report_delay, uint, (S_IRUGO | S_IWUSR));
40239 MODULE_PARM_DESC(pressure_report_delay, "Delay in secs in reporting pressure");
40240 -static atomic_t trans_id = ATOMIC_INIT(0);
40241 +static atomic_unchecked_t trans_id = ATOMIC_INIT(0);
40242
40243 static int dm_ring_size = (5 * PAGE_SIZE);
40244
40245 @@ -886,7 +886,7 @@ static void hot_add_req(struct work_struct *dummy)
40246 pr_info("Memory hot add failed\n");
40247
40248 dm->state = DM_INITIALIZED;
40249 - resp.hdr.trans_id = atomic_inc_return(&trans_id);
40250 + resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40251 vmbus_sendpacket(dm->dev->channel, &resp,
40252 sizeof(struct dm_hot_add_response),
40253 (unsigned long)NULL,
40254 @@ -960,7 +960,7 @@ static void post_status(struct hv_dynmem_device *dm)
40255 memset(&status, 0, sizeof(struct dm_status));
40256 status.hdr.type = DM_STATUS_REPORT;
40257 status.hdr.size = sizeof(struct dm_status);
40258 - status.hdr.trans_id = atomic_inc_return(&trans_id);
40259 + status.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40260
40261 /*
40262 * The host expects the guest to report free memory.
40263 @@ -980,7 +980,7 @@ static void post_status(struct hv_dynmem_device *dm)
40264 * send the status. This can happen if we were interrupted
40265 * after we picked our transaction ID.
40266 */
40267 - if (status.hdr.trans_id != atomic_read(&trans_id))
40268 + if (status.hdr.trans_id != atomic_read_unchecked(&trans_id))
40269 return;
40270
40271 vmbus_sendpacket(dm->dev->channel, &status,
40272 @@ -1108,7 +1108,7 @@ static void balloon_up(struct work_struct *dummy)
40273 */
40274
40275 do {
40276 - bl_resp->hdr.trans_id = atomic_inc_return(&trans_id);
40277 + bl_resp->hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40278 ret = vmbus_sendpacket(dm_device.dev->channel,
40279 bl_resp,
40280 bl_resp->hdr.size,
40281 @@ -1152,7 +1152,7 @@ static void balloon_down(struct hv_dynmem_device *dm,
40282
40283 memset(&resp, 0, sizeof(struct dm_unballoon_response));
40284 resp.hdr.type = DM_UNBALLOON_RESPONSE;
40285 - resp.hdr.trans_id = atomic_inc_return(&trans_id);
40286 + resp.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40287 resp.hdr.size = sizeof(struct dm_unballoon_response);
40288
40289 vmbus_sendpacket(dm_device.dev->channel, &resp,
40290 @@ -1215,7 +1215,7 @@ static void version_resp(struct hv_dynmem_device *dm,
40291 memset(&version_req, 0, sizeof(struct dm_version_request));
40292 version_req.hdr.type = DM_VERSION_REQUEST;
40293 version_req.hdr.size = sizeof(struct dm_version_request);
40294 - version_req.hdr.trans_id = atomic_inc_return(&trans_id);
40295 + version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40296 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN7;
40297 version_req.is_last_attempt = 1;
40298
40299 @@ -1385,7 +1385,7 @@ static int balloon_probe(struct hv_device *dev,
40300 memset(&version_req, 0, sizeof(struct dm_version_request));
40301 version_req.hdr.type = DM_VERSION_REQUEST;
40302 version_req.hdr.size = sizeof(struct dm_version_request);
40303 - version_req.hdr.trans_id = atomic_inc_return(&trans_id);
40304 + version_req.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40305 version_req.version.version = DYNMEM_PROTOCOL_VERSION_WIN8;
40306 version_req.is_last_attempt = 0;
40307
40308 @@ -1416,7 +1416,7 @@ static int balloon_probe(struct hv_device *dev,
40309 memset(&cap_msg, 0, sizeof(struct dm_capabilities));
40310 cap_msg.hdr.type = DM_CAPABILITIES_REPORT;
40311 cap_msg.hdr.size = sizeof(struct dm_capabilities);
40312 - cap_msg.hdr.trans_id = atomic_inc_return(&trans_id);
40313 + cap_msg.hdr.trans_id = atomic_inc_return_unchecked(&trans_id);
40314
40315 cap_msg.caps.cap_bits.balloon = 1;
40316 cap_msg.caps.cap_bits.hot_add = 1;
40317 diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
40318 index d84918f..7f38f9f 100644
40319 --- a/drivers/hv/hyperv_vmbus.h
40320 +++ b/drivers/hv/hyperv_vmbus.h
40321 @@ -595,7 +595,7 @@ enum vmbus_connect_state {
40322 struct vmbus_connection {
40323 enum vmbus_connect_state conn_state;
40324
40325 - atomic_t next_gpadl_handle;
40326 + atomic_unchecked_t next_gpadl_handle;
40327
40328 /*
40329 * Represents channel interrupts. Each bit position represents a
40330 diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
40331 index f9fe46f..356b119 100644
40332 --- a/drivers/hv/vmbus_drv.c
40333 +++ b/drivers/hv/vmbus_drv.c
40334 @@ -672,10 +672,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
40335 {
40336 int ret = 0;
40337
40338 - static atomic_t device_num = ATOMIC_INIT(0);
40339 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
40340
40341 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
40342 - atomic_inc_return(&device_num));
40343 + atomic_inc_return_unchecked(&device_num));
40344
40345 child_device_obj->device.bus = &hv_bus;
40346 child_device_obj->device.parent = &hv_acpi_dev->dev;
40347 diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
40348 index a9e3d01..9dd246e 100644
40349 --- a/drivers/hwmon/acpi_power_meter.c
40350 +++ b/drivers/hwmon/acpi_power_meter.c
40351 @@ -117,7 +117,7 @@ struct sensor_template {
40352 struct device_attribute *devattr,
40353 const char *buf, size_t count);
40354 int index;
40355 -};
40356 +} __do_const;
40357
40358 /* Averaging interval */
40359 static int update_avg_interval(struct acpi_power_meter_resource *resource)
40360 @@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
40361 struct sensor_template *attrs)
40362 {
40363 struct device *dev = &resource->acpi_dev->dev;
40364 - struct sensor_device_attribute *sensors =
40365 + sensor_device_attribute_no_const *sensors =
40366 &resource->sensors[resource->num_sensors];
40367 int res = 0;
40368
40369 diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
40370 index 3288f13..71cfb4e 100644
40371 --- a/drivers/hwmon/applesmc.c
40372 +++ b/drivers/hwmon/applesmc.c
40373 @@ -1106,7 +1106,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
40374 {
40375 struct applesmc_node_group *grp;
40376 struct applesmc_dev_attr *node;
40377 - struct attribute *attr;
40378 + attribute_no_const *attr;
40379 int ret, i;
40380
40381 for (grp = groups; grp->format; grp++) {
40382 diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
40383 index b25c643..a13460d 100644
40384 --- a/drivers/hwmon/asus_atk0110.c
40385 +++ b/drivers/hwmon/asus_atk0110.c
40386 @@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
40387 struct atk_sensor_data {
40388 struct list_head list;
40389 struct atk_data *data;
40390 - struct device_attribute label_attr;
40391 - struct device_attribute input_attr;
40392 - struct device_attribute limit1_attr;
40393 - struct device_attribute limit2_attr;
40394 + device_attribute_no_const label_attr;
40395 + device_attribute_no_const input_attr;
40396 + device_attribute_no_const limit1_attr;
40397 + device_attribute_no_const limit2_attr;
40398 char label_attr_name[ATTR_NAME_SIZE];
40399 char input_attr_name[ATTR_NAME_SIZE];
40400 char limit1_attr_name[ATTR_NAME_SIZE];
40401 @@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
40402 static struct device_attribute atk_name_attr =
40403 __ATTR(name, 0444, atk_name_show, NULL);
40404
40405 -static void atk_init_attribute(struct device_attribute *attr, char *name,
40406 +static void atk_init_attribute(device_attribute_no_const *attr, char *name,
40407 sysfs_show_func show)
40408 {
40409 sysfs_attr_init(&attr->attr);
40410 diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
40411 index 78be661..4dd032f 100644
40412 --- a/drivers/hwmon/coretemp.c
40413 +++ b/drivers/hwmon/coretemp.c
40414 @@ -797,7 +797,7 @@ static int coretemp_cpu_callback(struct notifier_block *nfb,
40415 return NOTIFY_OK;
40416 }
40417
40418 -static struct notifier_block coretemp_cpu_notifier __refdata = {
40419 +static struct notifier_block coretemp_cpu_notifier = {
40420 .notifier_call = coretemp_cpu_callback,
40421 };
40422
40423 diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
40424 index 632f1dc..57e6a58 100644
40425 --- a/drivers/hwmon/ibmaem.c
40426 +++ b/drivers/hwmon/ibmaem.c
40427 @@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
40428 struct aem_rw_sensor_template *rw)
40429 {
40430 struct device *dev = &data->pdev->dev;
40431 - struct sensor_device_attribute *sensors = data->sensors;
40432 + sensor_device_attribute_no_const *sensors = data->sensors;
40433 int err;
40434
40435 /* Set up read-only sensors */
40436 diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
40437 index 708081b..fe2d4ab 100644
40438 --- a/drivers/hwmon/iio_hwmon.c
40439 +++ b/drivers/hwmon/iio_hwmon.c
40440 @@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
40441 {
40442 struct device *dev = &pdev->dev;
40443 struct iio_hwmon_state *st;
40444 - struct sensor_device_attribute *a;
40445 + sensor_device_attribute_no_const *a;
40446 int ret, i;
40447 int in_i = 1, temp_i = 1, curr_i = 1;
40448 enum iio_chan_type type;
40449 diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
40450 index 6eb03ce..bea7e3e 100644
40451 --- a/drivers/hwmon/nct6775.c
40452 +++ b/drivers/hwmon/nct6775.c
40453 @@ -936,10 +936,10 @@ static struct attribute_group *
40454 nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
40455 int repeat)
40456 {
40457 - struct attribute_group *group;
40458 + attribute_group_no_const *group;
40459 struct sensor_device_attr_u *su;
40460 - struct sensor_device_attribute *a;
40461 - struct sensor_device_attribute_2 *a2;
40462 + sensor_device_attribute_no_const *a;
40463 + sensor_device_attribute_2_no_const *a2;
40464 struct attribute **attrs;
40465 struct sensor_device_template **t;
40466 int err, i, j, count;
40467 diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
40468 index 9319fcf..189ff45 100644
40469 --- a/drivers/hwmon/pmbus/pmbus_core.c
40470 +++ b/drivers/hwmon/pmbus/pmbus_core.c
40471 @@ -781,7 +781,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
40472 return 0;
40473 }
40474
40475 -static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
40476 +static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
40477 const char *name,
40478 umode_t mode,
40479 ssize_t (*show)(struct device *dev,
40480 @@ -798,7 +798,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
40481 dev_attr->store = store;
40482 }
40483
40484 -static void pmbus_attr_init(struct sensor_device_attribute *a,
40485 +static void pmbus_attr_init(sensor_device_attribute_no_const *a,
40486 const char *name,
40487 umode_t mode,
40488 ssize_t (*show)(struct device *dev,
40489 @@ -820,7 +820,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
40490 u16 reg, u8 mask)
40491 {
40492 struct pmbus_boolean *boolean;
40493 - struct sensor_device_attribute *a;
40494 + sensor_device_attribute_no_const *a;
40495
40496 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
40497 if (!boolean)
40498 @@ -845,7 +845,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
40499 bool update, bool readonly)
40500 {
40501 struct pmbus_sensor *sensor;
40502 - struct device_attribute *a;
40503 + device_attribute_no_const *a;
40504
40505 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
40506 if (!sensor)
40507 @@ -876,7 +876,7 @@ static int pmbus_add_label(struct pmbus_data *data,
40508 const char *lstring, int index)
40509 {
40510 struct pmbus_label *label;
40511 - struct device_attribute *a;
40512 + device_attribute_no_const *a;
40513
40514 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
40515 if (!label)
40516 diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
40517 index 97cd45a..ac54d8b 100644
40518 --- a/drivers/hwmon/sht15.c
40519 +++ b/drivers/hwmon/sht15.c
40520 @@ -169,7 +169,7 @@ struct sht15_data {
40521 int supply_uv;
40522 bool supply_uv_valid;
40523 struct work_struct update_supply_work;
40524 - atomic_t interrupt_handled;
40525 + atomic_unchecked_t interrupt_handled;
40526 };
40527
40528 /**
40529 @@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
40530 ret = gpio_direction_input(data->pdata->gpio_data);
40531 if (ret)
40532 return ret;
40533 - atomic_set(&data->interrupt_handled, 0);
40534 + atomic_set_unchecked(&data->interrupt_handled, 0);
40535
40536 enable_irq(gpio_to_irq(data->pdata->gpio_data));
40537 if (gpio_get_value(data->pdata->gpio_data) == 0) {
40538 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
40539 /* Only relevant if the interrupt hasn't occurred. */
40540 - if (!atomic_read(&data->interrupt_handled))
40541 + if (!atomic_read_unchecked(&data->interrupt_handled))
40542 schedule_work(&data->read_work);
40543 }
40544 ret = wait_event_timeout(data->wait_queue,
40545 @@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
40546
40547 /* First disable the interrupt */
40548 disable_irq_nosync(irq);
40549 - atomic_inc(&data->interrupt_handled);
40550 + atomic_inc_unchecked(&data->interrupt_handled);
40551 /* Then schedule a reading work struct */
40552 if (data->state != SHT15_READING_NOTHING)
40553 schedule_work(&data->read_work);
40554 @@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
40555 * If not, then start the interrupt again - care here as could
40556 * have gone low in meantime so verify it hasn't!
40557 */
40558 - atomic_set(&data->interrupt_handled, 0);
40559 + atomic_set_unchecked(&data->interrupt_handled, 0);
40560 enable_irq(gpio_to_irq(data->pdata->gpio_data));
40561 /* If still not occurred or another handler was scheduled */
40562 if (gpio_get_value(data->pdata->gpio_data)
40563 - || atomic_read(&data->interrupt_handled))
40564 + || atomic_read_unchecked(&data->interrupt_handled))
40565 return;
40566 }
40567
40568 diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
40569 index 38944e9..ae9e5ed 100644
40570 --- a/drivers/hwmon/via-cputemp.c
40571 +++ b/drivers/hwmon/via-cputemp.c
40572 @@ -296,7 +296,7 @@ static int via_cputemp_cpu_callback(struct notifier_block *nfb,
40573 return NOTIFY_OK;
40574 }
40575
40576 -static struct notifier_block via_cputemp_cpu_notifier __refdata = {
40577 +static struct notifier_block via_cputemp_cpu_notifier = {
40578 .notifier_call = via_cputemp_cpu_callback,
40579 };
40580
40581 diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
40582 index 07f01ac..d79ad3d 100644
40583 --- a/drivers/i2c/busses/i2c-amd756-s4882.c
40584 +++ b/drivers/i2c/busses/i2c-amd756-s4882.c
40585 @@ -43,7 +43,7 @@
40586 extern struct i2c_adapter amd756_smbus;
40587
40588 static struct i2c_adapter *s4882_adapter;
40589 -static struct i2c_algorithm *s4882_algo;
40590 +static i2c_algorithm_no_const *s4882_algo;
40591
40592 /* Wrapper access functions for multiplexed SMBus */
40593 static DEFINE_MUTEX(amd756_lock);
40594 diff --git a/drivers/i2c/busses/i2c-diolan-u2c.c b/drivers/i2c/busses/i2c-diolan-u2c.c
40595 index dae3ddf..26e21d1 100644
40596 --- a/drivers/i2c/busses/i2c-diolan-u2c.c
40597 +++ b/drivers/i2c/busses/i2c-diolan-u2c.c
40598 @@ -99,7 +99,7 @@ MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
40599 /* usb layer */
40600
40601 /* Send command to device, and get response. */
40602 -static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
40603 +static int __intentional_overflow(-1) diolan_usb_transfer(struct i2c_diolan_u2c *dev)
40604 {
40605 int ret = 0;
40606 int actual;
40607 diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
40608 index 2ca268d..c6acbdf 100644
40609 --- a/drivers/i2c/busses/i2c-nforce2-s4985.c
40610 +++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
40611 @@ -41,7 +41,7 @@
40612 extern struct i2c_adapter *nforce2_smbus;
40613
40614 static struct i2c_adapter *s4985_adapter;
40615 -static struct i2c_algorithm *s4985_algo;
40616 +static i2c_algorithm_no_const *s4985_algo;
40617
40618 /* Wrapper access functions for multiplexed SMBus */
40619 static DEFINE_MUTEX(nforce2_lock);
40620 diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
40621 index c3ccdea..5b3dc1a 100644
40622 --- a/drivers/i2c/i2c-dev.c
40623 +++ b/drivers/i2c/i2c-dev.c
40624 @@ -271,7 +271,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
40625 break;
40626 }
40627
40628 - data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
40629 + data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
40630 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
40631 if (IS_ERR(rdwr_pa[i].buf)) {
40632 res = PTR_ERR(rdwr_pa[i].buf);
40633 diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
40634 index 0b510ba..4fbb5085 100644
40635 --- a/drivers/ide/ide-cd.c
40636 +++ b/drivers/ide/ide-cd.c
40637 @@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
40638 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
40639 if ((unsigned long)buf & alignment
40640 || blk_rq_bytes(rq) & q->dma_pad_mask
40641 - || object_is_on_stack(buf))
40642 + || object_starts_on_stack(buf))
40643 drive->dma = 0;
40644 }
40645 }
40646 diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
40647 index f95c697..0a1b05c 100644
40648 --- a/drivers/iio/industrialio-core.c
40649 +++ b/drivers/iio/industrialio-core.c
40650 @@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
40651 }
40652
40653 static
40654 -int __iio_device_attr_init(struct device_attribute *dev_attr,
40655 +int __iio_device_attr_init(device_attribute_no_const *dev_attr,
40656 const char *postfix,
40657 struct iio_chan_spec const *chan,
40658 ssize_t (*readfunc)(struct device *dev,
40659 diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
40660 index 784b97c..c9ceadf 100644
40661 --- a/drivers/infiniband/core/cm.c
40662 +++ b/drivers/infiniband/core/cm.c
40663 @@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
40664
40665 struct cm_counter_group {
40666 struct kobject obj;
40667 - atomic_long_t counter[CM_ATTR_COUNT];
40668 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
40669 };
40670
40671 struct cm_counter_attribute {
40672 @@ -1395,7 +1395,7 @@ static void cm_dup_req_handler(struct cm_work *work,
40673 struct ib_mad_send_buf *msg = NULL;
40674 int ret;
40675
40676 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40677 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40678 counter[CM_REQ_COUNTER]);
40679
40680 /* Quick state check to discard duplicate REQs. */
40681 @@ -1779,7 +1779,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
40682 if (!cm_id_priv)
40683 return;
40684
40685 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40686 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40687 counter[CM_REP_COUNTER]);
40688 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
40689 if (ret)
40690 @@ -1946,7 +1946,7 @@ static int cm_rtu_handler(struct cm_work *work)
40691 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
40692 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
40693 spin_unlock_irq(&cm_id_priv->lock);
40694 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40695 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40696 counter[CM_RTU_COUNTER]);
40697 goto out;
40698 }
40699 @@ -2129,7 +2129,7 @@ static int cm_dreq_handler(struct cm_work *work)
40700 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
40701 dreq_msg->local_comm_id);
40702 if (!cm_id_priv) {
40703 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40704 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40705 counter[CM_DREQ_COUNTER]);
40706 cm_issue_drep(work->port, work->mad_recv_wc);
40707 return -EINVAL;
40708 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
40709 case IB_CM_MRA_REP_RCVD:
40710 break;
40711 case IB_CM_TIMEWAIT:
40712 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40713 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40714 counter[CM_DREQ_COUNTER]);
40715 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
40716 goto unlock;
40717 @@ -2168,7 +2168,7 @@ static int cm_dreq_handler(struct cm_work *work)
40718 cm_free_msg(msg);
40719 goto deref;
40720 case IB_CM_DREQ_RCVD:
40721 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40722 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40723 counter[CM_DREQ_COUNTER]);
40724 goto unlock;
40725 default:
40726 @@ -2535,7 +2535,7 @@ static int cm_mra_handler(struct cm_work *work)
40727 ib_modify_mad(cm_id_priv->av.port->mad_agent,
40728 cm_id_priv->msg, timeout)) {
40729 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
40730 - atomic_long_inc(&work->port->
40731 + atomic_long_inc_unchecked(&work->port->
40732 counter_group[CM_RECV_DUPLICATES].
40733 counter[CM_MRA_COUNTER]);
40734 goto out;
40735 @@ -2544,7 +2544,7 @@ static int cm_mra_handler(struct cm_work *work)
40736 break;
40737 case IB_CM_MRA_REQ_RCVD:
40738 case IB_CM_MRA_REP_RCVD:
40739 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40740 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40741 counter[CM_MRA_COUNTER]);
40742 /* fall through */
40743 default:
40744 @@ -2706,7 +2706,7 @@ static int cm_lap_handler(struct cm_work *work)
40745 case IB_CM_LAP_IDLE:
40746 break;
40747 case IB_CM_MRA_LAP_SENT:
40748 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40749 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40750 counter[CM_LAP_COUNTER]);
40751 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
40752 goto unlock;
40753 @@ -2722,7 +2722,7 @@ static int cm_lap_handler(struct cm_work *work)
40754 cm_free_msg(msg);
40755 goto deref;
40756 case IB_CM_LAP_RCVD:
40757 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40758 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40759 counter[CM_LAP_COUNTER]);
40760 goto unlock;
40761 default:
40762 @@ -3006,7 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
40763 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
40764 if (cur_cm_id_priv) {
40765 spin_unlock_irq(&cm.lock);
40766 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
40767 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
40768 counter[CM_SIDR_REQ_COUNTER]);
40769 goto out; /* Duplicate message. */
40770 }
40771 @@ -3218,10 +3218,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
40772 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
40773 msg->retries = 1;
40774
40775 - atomic_long_add(1 + msg->retries,
40776 + atomic_long_add_unchecked(1 + msg->retries,
40777 &port->counter_group[CM_XMIT].counter[attr_index]);
40778 if (msg->retries)
40779 - atomic_long_add(msg->retries,
40780 + atomic_long_add_unchecked(msg->retries,
40781 &port->counter_group[CM_XMIT_RETRIES].
40782 counter[attr_index]);
40783
40784 @@ -3431,7 +3431,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
40785 }
40786
40787 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
40788 - atomic_long_inc(&port->counter_group[CM_RECV].
40789 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
40790 counter[attr_id - CM_ATTR_ID_OFFSET]);
40791
40792 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
40793 @@ -3636,7 +3636,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
40794 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
40795
40796 return sprintf(buf, "%ld\n",
40797 - atomic_long_read(&group->counter[cm_attr->index]));
40798 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
40799 }
40800
40801 static const struct sysfs_ops cm_counter_ops = {
40802 diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
40803 index 9f5ad7c..588cd84 100644
40804 --- a/drivers/infiniband/core/fmr_pool.c
40805 +++ b/drivers/infiniband/core/fmr_pool.c
40806 @@ -98,8 +98,8 @@ struct ib_fmr_pool {
40807
40808 struct task_struct *thread;
40809
40810 - atomic_t req_ser;
40811 - atomic_t flush_ser;
40812 + atomic_unchecked_t req_ser;
40813 + atomic_unchecked_t flush_ser;
40814
40815 wait_queue_head_t force_wait;
40816 };
40817 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
40818 struct ib_fmr_pool *pool = pool_ptr;
40819
40820 do {
40821 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
40822 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
40823 ib_fmr_batch_release(pool);
40824
40825 - atomic_inc(&pool->flush_ser);
40826 + atomic_inc_unchecked(&pool->flush_ser);
40827 wake_up_interruptible(&pool->force_wait);
40828
40829 if (pool->flush_function)
40830 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
40831 }
40832
40833 set_current_state(TASK_INTERRUPTIBLE);
40834 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
40835 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
40836 !kthread_should_stop())
40837 schedule();
40838 __set_current_state(TASK_RUNNING);
40839 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
40840 pool->dirty_watermark = params->dirty_watermark;
40841 pool->dirty_len = 0;
40842 spin_lock_init(&pool->pool_lock);
40843 - atomic_set(&pool->req_ser, 0);
40844 - atomic_set(&pool->flush_ser, 0);
40845 + atomic_set_unchecked(&pool->req_ser, 0);
40846 + atomic_set_unchecked(&pool->flush_ser, 0);
40847 init_waitqueue_head(&pool->force_wait);
40848
40849 pool->thread = kthread_run(ib_fmr_cleanup_thread,
40850 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
40851 }
40852 spin_unlock_irq(&pool->pool_lock);
40853
40854 - serial = atomic_inc_return(&pool->req_ser);
40855 + serial = atomic_inc_return_unchecked(&pool->req_ser);
40856 wake_up_process(pool->thread);
40857
40858 if (wait_event_interruptible(pool->force_wait,
40859 - atomic_read(&pool->flush_ser) - serial >= 0))
40860 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
40861 return -EINTR;
40862
40863 return 0;
40864 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
40865 } else {
40866 list_add_tail(&fmr->list, &pool->dirty_list);
40867 if (++pool->dirty_len >= pool->dirty_watermark) {
40868 - atomic_inc(&pool->req_ser);
40869 + atomic_inc_unchecked(&pool->req_ser);
40870 wake_up_process(pool->thread);
40871 }
40872 }
40873 diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
40874 index 4cb8eb2..146bf60 100644
40875 --- a/drivers/infiniband/hw/cxgb4/mem.c
40876 +++ b/drivers/infiniband/hw/cxgb4/mem.c
40877 @@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
40878 int err;
40879 struct fw_ri_tpte tpt;
40880 u32 stag_idx;
40881 - static atomic_t key;
40882 + static atomic_unchecked_t key;
40883
40884 if (c4iw_fatal_error(rdev))
40885 return -EIO;
40886 @@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
40887 if (rdev->stats.stag.cur > rdev->stats.stag.max)
40888 rdev->stats.stag.max = rdev->stats.stag.cur;
40889 mutex_unlock(&rdev->stats.lock);
40890 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
40891 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
40892 }
40893 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
40894 __func__, stag_state, type, pdid, stag_idx);
40895 diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
40896 index 79b3dbc..96e5fcc 100644
40897 --- a/drivers/infiniband/hw/ipath/ipath_rc.c
40898 +++ b/drivers/infiniband/hw/ipath/ipath_rc.c
40899 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
40900 struct ib_atomic_eth *ateth;
40901 struct ipath_ack_entry *e;
40902 u64 vaddr;
40903 - atomic64_t *maddr;
40904 + atomic64_unchecked_t *maddr;
40905 u64 sdata;
40906 u32 rkey;
40907 u8 next;
40908 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
40909 IB_ACCESS_REMOTE_ATOMIC)))
40910 goto nack_acc_unlck;
40911 /* Perform atomic OP and save result. */
40912 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
40913 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
40914 sdata = be64_to_cpu(ateth->swap_data);
40915 e = &qp->s_ack_queue[qp->r_head_ack_queue];
40916 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
40917 - (u64) atomic64_add_return(sdata, maddr) - sdata :
40918 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
40919 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
40920 be64_to_cpu(ateth->compare_data),
40921 sdata);
40922 diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
40923 index 1f95bba..9530f87 100644
40924 --- a/drivers/infiniband/hw/ipath/ipath_ruc.c
40925 +++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
40926 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
40927 unsigned long flags;
40928 struct ib_wc wc;
40929 u64 sdata;
40930 - atomic64_t *maddr;
40931 + atomic64_unchecked_t *maddr;
40932 enum ib_wc_status send_status;
40933
40934 /*
40935 @@ -382,11 +382,11 @@ again:
40936 IB_ACCESS_REMOTE_ATOMIC)))
40937 goto acc_err;
40938 /* Perform atomic OP and save result. */
40939 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
40940 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
40941 sdata = wqe->wr.wr.atomic.compare_add;
40942 *(u64 *) sqp->s_sge.sge.vaddr =
40943 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
40944 - (u64) atomic64_add_return(sdata, maddr) - sdata :
40945 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
40946 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
40947 sdata, wqe->wr.wr.atomic.swap);
40948 goto send_comp;
40949 diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
40950 index f2a3f48..673ec79 100644
40951 --- a/drivers/infiniband/hw/mlx4/mad.c
40952 +++ b/drivers/infiniband/hw/mlx4/mad.c
40953 @@ -98,7 +98,7 @@ __be64 mlx4_ib_gen_node_guid(void)
40954
40955 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
40956 {
40957 - return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
40958 + return cpu_to_be64(atomic_inc_return_unchecked(&ctx->tid)) |
40959 cpu_to_be64(0xff00000000000000LL);
40960 }
40961
40962 diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
40963 index 25b2cdf..099ff97 100644
40964 --- a/drivers/infiniband/hw/mlx4/mcg.c
40965 +++ b/drivers/infiniband/hw/mlx4/mcg.c
40966 @@ -1040,7 +1040,7 @@ int mlx4_ib_mcg_port_init(struct mlx4_ib_demux_ctx *ctx)
40967 {
40968 char name[20];
40969
40970 - atomic_set(&ctx->tid, 0);
40971 + atomic_set_unchecked(&ctx->tid, 0);
40972 sprintf(name, "mlx4_ib_mcg%d", ctx->port);
40973 ctx->mcg_wq = create_singlethread_workqueue(name);
40974 if (!ctx->mcg_wq)
40975 diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
40976 index 036b663..c9a8c73 100644
40977 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
40978 +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
40979 @@ -404,7 +404,7 @@ struct mlx4_ib_demux_ctx {
40980 struct list_head mcg_mgid0_list;
40981 struct workqueue_struct *mcg_wq;
40982 struct mlx4_ib_demux_pv_ctx **tun;
40983 - atomic_t tid;
40984 + atomic_unchecked_t tid;
40985 int flushing; /* flushing the work queue */
40986 };
40987
40988 diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
40989 index 9d3e5c1..6f166df 100644
40990 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c
40991 +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
40992 @@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
40993 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
40994 }
40995
40996 -int mthca_QUERY_FW(struct mthca_dev *dev)
40997 +int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
40998 {
40999 struct mthca_mailbox *mailbox;
41000 u32 *outbox;
41001 @@ -1612,7 +1612,7 @@ int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41002 CMD_TIME_CLASS_B);
41003 }
41004
41005 -int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41006 +int __intentional_overflow(-1) mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41007 int num_mtt)
41008 {
41009 return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT,
41010 @@ -1634,7 +1634,7 @@ int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap,
41011 0, CMD_MAP_EQ, CMD_TIME_CLASS_B);
41012 }
41013
41014 -int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41015 +int __intentional_overflow(-1) mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox,
41016 int eq_num)
41017 {
41018 return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ,
41019 @@ -1857,7 +1857,7 @@ int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn)
41020 CMD_TIME_CLASS_B);
41021 }
41022
41023 -int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
41024 +int __intentional_overflow(-1) mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey,
41025 int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
41026 void *in_mad, void *response_mad)
41027 {
41028 diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
41029 index 87897b9..7e79542 100644
41030 --- a/drivers/infiniband/hw/mthca/mthca_main.c
41031 +++ b/drivers/infiniband/hw/mthca/mthca_main.c
41032 @@ -692,7 +692,7 @@ err_close:
41033 return err;
41034 }
41035
41036 -static int mthca_setup_hca(struct mthca_dev *dev)
41037 +static int __intentional_overflow(-1) mthca_setup_hca(struct mthca_dev *dev)
41038 {
41039 int err;
41040
41041 diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
41042 index ed9a989..6aa5dc2 100644
41043 --- a/drivers/infiniband/hw/mthca/mthca_mr.c
41044 +++ b/drivers/infiniband/hw/mthca/mthca_mr.c
41045 @@ -81,7 +81,7 @@ struct mthca_mpt_entry {
41046 * through the bitmaps)
41047 */
41048
41049 -static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
41050 +static u32 __intentional_overflow(-1) mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
41051 {
41052 int o;
41053 int m;
41054 @@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
41055 return key;
41056 }
41057
41058 -int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
41059 +int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
41060 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
41061 {
41062 struct mthca_mailbox *mailbox;
41063 @@ -516,7 +516,7 @@ int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
41064 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
41065 }
41066
41067 -int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
41068 +int __intentional_overflow(-1) mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
41069 u64 *buffer_list, int buffer_size_shift,
41070 int list_len, u64 iova, u64 total_size,
41071 u32 access, struct mthca_mr *mr)
41072 diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
41073 index 5b71d43..35a9e14 100644
41074 --- a/drivers/infiniband/hw/mthca/mthca_provider.c
41075 +++ b/drivers/infiniband/hw/mthca/mthca_provider.c
41076 @@ -763,7 +763,7 @@ unlock:
41077 return 0;
41078 }
41079
41080 -static int mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
41081 +static int __intentional_overflow(-1) mthca_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
41082 {
41083 struct mthca_dev *dev = to_mdev(ibcq->device);
41084 struct mthca_cq *cq = to_mcq(ibcq);
41085 diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
41086 index 4291410..d2ab1fb 100644
41087 --- a/drivers/infiniband/hw/nes/nes.c
41088 +++ b/drivers/infiniband/hw/nes/nes.c
41089 @@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
41090 LIST_HEAD(nes_adapter_list);
41091 static LIST_HEAD(nes_dev_list);
41092
41093 -atomic_t qps_destroyed;
41094 +atomic_unchecked_t qps_destroyed;
41095
41096 static unsigned int ee_flsh_adapter;
41097 static unsigned int sysfs_nonidx_addr;
41098 @@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
41099 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
41100 struct nes_adapter *nesadapter = nesdev->nesadapter;
41101
41102 - atomic_inc(&qps_destroyed);
41103 + atomic_inc_unchecked(&qps_destroyed);
41104
41105 /* Free the control structures */
41106
41107 diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
41108 index 33cc589..3bd6538 100644
41109 --- a/drivers/infiniband/hw/nes/nes.h
41110 +++ b/drivers/infiniband/hw/nes/nes.h
41111 @@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
41112 extern unsigned int wqm_quanta;
41113 extern struct list_head nes_adapter_list;
41114
41115 -extern atomic_t cm_connects;
41116 -extern atomic_t cm_accepts;
41117 -extern atomic_t cm_disconnects;
41118 -extern atomic_t cm_closes;
41119 -extern atomic_t cm_connecteds;
41120 -extern atomic_t cm_connect_reqs;
41121 -extern atomic_t cm_rejects;
41122 -extern atomic_t mod_qp_timouts;
41123 -extern atomic_t qps_created;
41124 -extern atomic_t qps_destroyed;
41125 -extern atomic_t sw_qps_destroyed;
41126 +extern atomic_unchecked_t cm_connects;
41127 +extern atomic_unchecked_t cm_accepts;
41128 +extern atomic_unchecked_t cm_disconnects;
41129 +extern atomic_unchecked_t cm_closes;
41130 +extern atomic_unchecked_t cm_connecteds;
41131 +extern atomic_unchecked_t cm_connect_reqs;
41132 +extern atomic_unchecked_t cm_rejects;
41133 +extern atomic_unchecked_t mod_qp_timouts;
41134 +extern atomic_unchecked_t qps_created;
41135 +extern atomic_unchecked_t qps_destroyed;
41136 +extern atomic_unchecked_t sw_qps_destroyed;
41137 extern u32 mh_detected;
41138 extern u32 mh_pauses_sent;
41139 extern u32 cm_packets_sent;
41140 @@ -196,16 +196,16 @@ extern u32 cm_packets_created;
41141 extern u32 cm_packets_received;
41142 extern u32 cm_packets_dropped;
41143 extern u32 cm_packets_retrans;
41144 -extern atomic_t cm_listens_created;
41145 -extern atomic_t cm_listens_destroyed;
41146 +extern atomic_unchecked_t cm_listens_created;
41147 +extern atomic_unchecked_t cm_listens_destroyed;
41148 extern u32 cm_backlog_drops;
41149 -extern atomic_t cm_loopbacks;
41150 -extern atomic_t cm_nodes_created;
41151 -extern atomic_t cm_nodes_destroyed;
41152 -extern atomic_t cm_accel_dropped_pkts;
41153 -extern atomic_t cm_resets_recvd;
41154 -extern atomic_t pau_qps_created;
41155 -extern atomic_t pau_qps_destroyed;
41156 +extern atomic_unchecked_t cm_loopbacks;
41157 +extern atomic_unchecked_t cm_nodes_created;
41158 +extern atomic_unchecked_t cm_nodes_destroyed;
41159 +extern atomic_unchecked_t cm_accel_dropped_pkts;
41160 +extern atomic_unchecked_t cm_resets_recvd;
41161 +extern atomic_unchecked_t pau_qps_created;
41162 +extern atomic_unchecked_t pau_qps_destroyed;
41163
41164 extern u32 int_mod_timer_init;
41165 extern u32 int_mod_cq_depth_256;
41166 diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
41167 index 6b29249..57081dd 100644
41168 --- a/drivers/infiniband/hw/nes/nes_cm.c
41169 +++ b/drivers/infiniband/hw/nes/nes_cm.c
41170 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
41171 u32 cm_packets_retrans;
41172 u32 cm_packets_created;
41173 u32 cm_packets_received;
41174 -atomic_t cm_listens_created;
41175 -atomic_t cm_listens_destroyed;
41176 +atomic_unchecked_t cm_listens_created;
41177 +atomic_unchecked_t cm_listens_destroyed;
41178 u32 cm_backlog_drops;
41179 -atomic_t cm_loopbacks;
41180 -atomic_t cm_nodes_created;
41181 -atomic_t cm_nodes_destroyed;
41182 -atomic_t cm_accel_dropped_pkts;
41183 -atomic_t cm_resets_recvd;
41184 +atomic_unchecked_t cm_loopbacks;
41185 +atomic_unchecked_t cm_nodes_created;
41186 +atomic_unchecked_t cm_nodes_destroyed;
41187 +atomic_unchecked_t cm_accel_dropped_pkts;
41188 +atomic_unchecked_t cm_resets_recvd;
41189
41190 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
41191 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
41192 @@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
41193
41194 static struct nes_cm_core *g_cm_core;
41195
41196 -atomic_t cm_connects;
41197 -atomic_t cm_accepts;
41198 -atomic_t cm_disconnects;
41199 -atomic_t cm_closes;
41200 -atomic_t cm_connecteds;
41201 -atomic_t cm_connect_reqs;
41202 -atomic_t cm_rejects;
41203 +atomic_unchecked_t cm_connects;
41204 +atomic_unchecked_t cm_accepts;
41205 +atomic_unchecked_t cm_disconnects;
41206 +atomic_unchecked_t cm_closes;
41207 +atomic_unchecked_t cm_connecteds;
41208 +atomic_unchecked_t cm_connect_reqs;
41209 +atomic_unchecked_t cm_rejects;
41210
41211 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
41212 {
41213 @@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
41214 kfree(listener);
41215 listener = NULL;
41216 ret = 0;
41217 - atomic_inc(&cm_listens_destroyed);
41218 + atomic_inc_unchecked(&cm_listens_destroyed);
41219 } else {
41220 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
41221 }
41222 @@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
41223 cm_node->rem_mac);
41224
41225 add_hte_node(cm_core, cm_node);
41226 - atomic_inc(&cm_nodes_created);
41227 + atomic_inc_unchecked(&cm_nodes_created);
41228
41229 return cm_node;
41230 }
41231 @@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
41232 }
41233
41234 atomic_dec(&cm_core->node_cnt);
41235 - atomic_inc(&cm_nodes_destroyed);
41236 + atomic_inc_unchecked(&cm_nodes_destroyed);
41237 nesqp = cm_node->nesqp;
41238 if (nesqp) {
41239 nesqp->cm_node = NULL;
41240 @@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
41241
41242 static void drop_packet(struct sk_buff *skb)
41243 {
41244 - atomic_inc(&cm_accel_dropped_pkts);
41245 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
41246 dev_kfree_skb_any(skb);
41247 }
41248
41249 @@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
41250 {
41251
41252 int reset = 0; /* whether to send reset in case of err.. */
41253 - atomic_inc(&cm_resets_recvd);
41254 + atomic_inc_unchecked(&cm_resets_recvd);
41255 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
41256 " refcnt=%d\n", cm_node, cm_node->state,
41257 atomic_read(&cm_node->ref_count));
41258 @@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
41259 rem_ref_cm_node(cm_node->cm_core, cm_node);
41260 return NULL;
41261 }
41262 - atomic_inc(&cm_loopbacks);
41263 + atomic_inc_unchecked(&cm_loopbacks);
41264 loopbackremotenode->loopbackpartner = cm_node;
41265 loopbackremotenode->tcp_cntxt.rcv_wscale =
41266 NES_CM_DEFAULT_RCV_WND_SCALE;
41267 @@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
41268 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
41269 else {
41270 rem_ref_cm_node(cm_core, cm_node);
41271 - atomic_inc(&cm_accel_dropped_pkts);
41272 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
41273 dev_kfree_skb_any(skb);
41274 }
41275 break;
41276 @@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
41277
41278 if ((cm_id) && (cm_id->event_handler)) {
41279 if (issue_disconn) {
41280 - atomic_inc(&cm_disconnects);
41281 + atomic_inc_unchecked(&cm_disconnects);
41282 cm_event.event = IW_CM_EVENT_DISCONNECT;
41283 cm_event.status = disconn_status;
41284 cm_event.local_addr = cm_id->local_addr;
41285 @@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
41286 }
41287
41288 if (issue_close) {
41289 - atomic_inc(&cm_closes);
41290 + atomic_inc_unchecked(&cm_closes);
41291 nes_disconnect(nesqp, 1);
41292
41293 cm_id->provider_data = nesqp;
41294 @@ -3035,7 +3035,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
41295
41296 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
41297 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
41298 - atomic_inc(&cm_accepts);
41299 + atomic_inc_unchecked(&cm_accepts);
41300
41301 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
41302 netdev_refcnt_read(nesvnic->netdev));
41303 @@ -3224,7 +3224,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
41304 struct nes_cm_core *cm_core;
41305 u8 *start_buff;
41306
41307 - atomic_inc(&cm_rejects);
41308 + atomic_inc_unchecked(&cm_rejects);
41309 cm_node = (struct nes_cm_node *)cm_id->provider_data;
41310 loopback = cm_node->loopbackpartner;
41311 cm_core = cm_node->cm_core;
41312 @@ -3286,7 +3286,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
41313 ntohs(raddr->sin_port), ntohl(laddr->sin_addr.s_addr),
41314 ntohs(laddr->sin_port));
41315
41316 - atomic_inc(&cm_connects);
41317 + atomic_inc_unchecked(&cm_connects);
41318 nesqp->active_conn = 1;
41319
41320 /* cache the cm_id in the qp */
41321 @@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
41322 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
41323 return err;
41324 }
41325 - atomic_inc(&cm_listens_created);
41326 + atomic_inc_unchecked(&cm_listens_created);
41327 }
41328
41329 cm_id->add_ref(cm_id);
41330 @@ -3505,7 +3505,7 @@ static void cm_event_connected(struct nes_cm_event *event)
41331
41332 if (nesqp->destroyed)
41333 return;
41334 - atomic_inc(&cm_connecteds);
41335 + atomic_inc_unchecked(&cm_connecteds);
41336 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
41337 " local port 0x%04X. jiffies = %lu.\n",
41338 nesqp->hwqp.qp_id, ntohl(raddr->sin_addr.s_addr),
41339 @@ -3686,7 +3686,7 @@ static void cm_event_reset(struct nes_cm_event *event)
41340
41341 cm_id->add_ref(cm_id);
41342 ret = cm_id->event_handler(cm_id, &cm_event);
41343 - atomic_inc(&cm_closes);
41344 + atomic_inc_unchecked(&cm_closes);
41345 cm_event.event = IW_CM_EVENT_CLOSE;
41346 cm_event.status = 0;
41347 cm_event.provider_data = cm_id->provider_data;
41348 @@ -3726,7 +3726,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
41349 return;
41350 cm_id = cm_node->cm_id;
41351
41352 - atomic_inc(&cm_connect_reqs);
41353 + atomic_inc_unchecked(&cm_connect_reqs);
41354 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
41355 cm_node, cm_id, jiffies);
41356
41357 @@ -3770,7 +3770,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
41358 return;
41359 cm_id = cm_node->cm_id;
41360
41361 - atomic_inc(&cm_connect_reqs);
41362 + atomic_inc_unchecked(&cm_connect_reqs);
41363 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
41364 cm_node, cm_id, jiffies);
41365
41366 diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
41367 index 4166452..fc952c3 100644
41368 --- a/drivers/infiniband/hw/nes/nes_mgt.c
41369 +++ b/drivers/infiniband/hw/nes/nes_mgt.c
41370 @@ -40,8 +40,8 @@
41371 #include "nes.h"
41372 #include "nes_mgt.h"
41373
41374 -atomic_t pau_qps_created;
41375 -atomic_t pau_qps_destroyed;
41376 +atomic_unchecked_t pau_qps_created;
41377 +atomic_unchecked_t pau_qps_destroyed;
41378
41379 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
41380 {
41381 @@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
41382 {
41383 struct sk_buff *skb;
41384 unsigned long flags;
41385 - atomic_inc(&pau_qps_destroyed);
41386 + atomic_inc_unchecked(&pau_qps_destroyed);
41387
41388 /* Free packets that have not yet been forwarded */
41389 /* Lock is acquired by skb_dequeue when removing the skb */
41390 @@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
41391 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
41392 skb_queue_head_init(&nesqp->pau_list);
41393 spin_lock_init(&nesqp->pau_lock);
41394 - atomic_inc(&pau_qps_created);
41395 + atomic_inc_unchecked(&pau_qps_created);
41396 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
41397 }
41398
41399 diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
41400 index 49eb511..a774366 100644
41401 --- a/drivers/infiniband/hw/nes/nes_nic.c
41402 +++ b/drivers/infiniband/hw/nes/nes_nic.c
41403 @@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
41404 target_stat_values[++index] = mh_detected;
41405 target_stat_values[++index] = mh_pauses_sent;
41406 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
41407 - target_stat_values[++index] = atomic_read(&cm_connects);
41408 - target_stat_values[++index] = atomic_read(&cm_accepts);
41409 - target_stat_values[++index] = atomic_read(&cm_disconnects);
41410 - target_stat_values[++index] = atomic_read(&cm_connecteds);
41411 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
41412 - target_stat_values[++index] = atomic_read(&cm_rejects);
41413 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
41414 - target_stat_values[++index] = atomic_read(&qps_created);
41415 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
41416 - target_stat_values[++index] = atomic_read(&qps_destroyed);
41417 - target_stat_values[++index] = atomic_read(&cm_closes);
41418 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
41419 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
41420 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
41421 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
41422 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
41423 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
41424 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
41425 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
41426 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
41427 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
41428 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
41429 target_stat_values[++index] = cm_packets_sent;
41430 target_stat_values[++index] = cm_packets_bounced;
41431 target_stat_values[++index] = cm_packets_created;
41432 target_stat_values[++index] = cm_packets_received;
41433 target_stat_values[++index] = cm_packets_dropped;
41434 target_stat_values[++index] = cm_packets_retrans;
41435 - target_stat_values[++index] = atomic_read(&cm_listens_created);
41436 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
41437 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
41438 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
41439 target_stat_values[++index] = cm_backlog_drops;
41440 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
41441 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
41442 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
41443 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
41444 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
41445 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
41446 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
41447 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
41448 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
41449 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
41450 target_stat_values[++index] = nesadapter->free_4kpbl;
41451 target_stat_values[++index] = nesadapter->free_256pbl;
41452 target_stat_values[++index] = int_mod_timer_init;
41453 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
41454 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
41455 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
41456 - target_stat_values[++index] = atomic_read(&pau_qps_created);
41457 - target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
41458 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
41459 + target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
41460 }
41461
41462 /**
41463 diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
41464 index 5b53ca5..443da3c 100644
41465 --- a/drivers/infiniband/hw/nes/nes_verbs.c
41466 +++ b/drivers/infiniband/hw/nes/nes_verbs.c
41467 @@ -46,9 +46,9 @@
41468
41469 #include <rdma/ib_umem.h>
41470
41471 -atomic_t mod_qp_timouts;
41472 -atomic_t qps_created;
41473 -atomic_t sw_qps_destroyed;
41474 +atomic_unchecked_t mod_qp_timouts;
41475 +atomic_unchecked_t qps_created;
41476 +atomic_unchecked_t sw_qps_destroyed;
41477
41478 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
41479
41480 @@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
41481 if (init_attr->create_flags)
41482 return ERR_PTR(-EINVAL);
41483
41484 - atomic_inc(&qps_created);
41485 + atomic_inc_unchecked(&qps_created);
41486 switch (init_attr->qp_type) {
41487 case IB_QPT_RC:
41488 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
41489 @@ -1466,7 +1466,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
41490 struct iw_cm_event cm_event;
41491 int ret = 0;
41492
41493 - atomic_inc(&sw_qps_destroyed);
41494 + atomic_inc_unchecked(&sw_qps_destroyed);
41495 nesqp->destroyed = 1;
41496
41497 /* Blow away the connection if it exists. */
41498 diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
41499 index 1946101..09766d2 100644
41500 --- a/drivers/infiniband/hw/qib/qib.h
41501 +++ b/drivers/infiniband/hw/qib/qib.h
41502 @@ -52,6 +52,7 @@
41503 #include <linux/kref.h>
41504 #include <linux/sched.h>
41505 #include <linux/kthread.h>
41506 +#include <linux/slab.h>
41507
41508 #include "qib_common.h"
41509 #include "qib_verbs.h"
41510 diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
41511 index 922a7fe..bb035db 100644
41512 --- a/drivers/input/gameport/gameport.c
41513 +++ b/drivers/input/gameport/gameport.c
41514 @@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
41515 */
41516 static void gameport_init_port(struct gameport *gameport)
41517 {
41518 - static atomic_t gameport_no = ATOMIC_INIT(0);
41519 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
41520
41521 __module_get(THIS_MODULE);
41522
41523 mutex_init(&gameport->drv_mutex);
41524 device_initialize(&gameport->dev);
41525 dev_set_name(&gameport->dev, "gameport%lu",
41526 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
41527 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
41528 gameport->dev.bus = &gameport_bus;
41529 gameport->dev.release = gameport_release_port;
41530 if (gameport->parent)
41531 diff --git a/drivers/input/input.c b/drivers/input/input.c
41532 index e75d015..57d1c28 100644
41533 --- a/drivers/input/input.c
41534 +++ b/drivers/input/input.c
41535 @@ -1734,7 +1734,7 @@ EXPORT_SYMBOL_GPL(input_class);
41536 */
41537 struct input_dev *input_allocate_device(void)
41538 {
41539 - static atomic_t input_no = ATOMIC_INIT(0);
41540 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
41541 struct input_dev *dev;
41542
41543 dev = kzalloc(sizeof(struct input_dev), GFP_KERNEL);
41544 @@ -1749,7 +1749,7 @@ struct input_dev *input_allocate_device(void)
41545 INIT_LIST_HEAD(&dev->node);
41546
41547 dev_set_name(&dev->dev, "input%ld",
41548 - (unsigned long) atomic_inc_return(&input_no) - 1);
41549 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
41550
41551 __module_get(THIS_MODULE);
41552 }
41553 diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
41554 index 04c69af..5f92d00 100644
41555 --- a/drivers/input/joystick/sidewinder.c
41556 +++ b/drivers/input/joystick/sidewinder.c
41557 @@ -30,6 +30,7 @@
41558 #include <linux/kernel.h>
41559 #include <linux/module.h>
41560 #include <linux/slab.h>
41561 +#include <linux/sched.h>
41562 #include <linux/init.h>
41563 #include <linux/input.h>
41564 #include <linux/gameport.h>
41565 diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
41566 index 75e3b10..fb390fd 100644
41567 --- a/drivers/input/joystick/xpad.c
41568 +++ b/drivers/input/joystick/xpad.c
41569 @@ -736,7 +736,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
41570
41571 static int xpad_led_probe(struct usb_xpad *xpad)
41572 {
41573 - static atomic_t led_seq = ATOMIC_INIT(0);
41574 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
41575 long led_no;
41576 struct xpad_led *led;
41577 struct led_classdev *led_cdev;
41578 @@ -749,7 +749,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
41579 if (!led)
41580 return -ENOMEM;
41581
41582 - led_no = (long)atomic_inc_return(&led_seq) - 1;
41583 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
41584
41585 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
41586 led->xpad = xpad;
41587 diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
41588 index e204f26..8459f15 100644
41589 --- a/drivers/input/misc/ims-pcu.c
41590 +++ b/drivers/input/misc/ims-pcu.c
41591 @@ -1621,7 +1621,7 @@ static int ims_pcu_identify_type(struct ims_pcu *pcu, u8 *device_id)
41592
41593 static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
41594 {
41595 - static atomic_t device_no = ATOMIC_INIT(0);
41596 + static atomic_unchecked_t device_no = ATOMIC_INIT(0);
41597
41598 const struct ims_pcu_device_info *info;
41599 u8 device_id;
41600 @@ -1653,7 +1653,7 @@ static int ims_pcu_init_application_mode(struct ims_pcu *pcu)
41601 }
41602
41603 /* Device appears to be operable, complete initialization */
41604 - pcu->device_no = atomic_inc_return(&device_no) - 1;
41605 + pcu->device_no = atomic_inc_return_unchecked(&device_no) - 1;
41606
41607 error = ims_pcu_setup_backlight(pcu);
41608 if (error)
41609 diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
41610 index 2f0b39d..7370f13 100644
41611 --- a/drivers/input/mouse/psmouse.h
41612 +++ b/drivers/input/mouse/psmouse.h
41613 @@ -116,7 +116,7 @@ struct psmouse_attribute {
41614 ssize_t (*set)(struct psmouse *psmouse, void *data,
41615 const char *buf, size_t count);
41616 bool protect;
41617 -};
41618 +} __do_const;
41619 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
41620
41621 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
41622 diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
41623 index 4c842c3..590b0bf 100644
41624 --- a/drivers/input/mousedev.c
41625 +++ b/drivers/input/mousedev.c
41626 @@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
41627
41628 spin_unlock_irq(&client->packet_lock);
41629
41630 - if (copy_to_user(buffer, data, count))
41631 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
41632 return -EFAULT;
41633
41634 return count;
41635 diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
41636 index 2b56855..5a55837 100644
41637 --- a/drivers/input/serio/serio.c
41638 +++ b/drivers/input/serio/serio.c
41639 @@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
41640 */
41641 static void serio_init_port(struct serio *serio)
41642 {
41643 - static atomic_t serio_no = ATOMIC_INIT(0);
41644 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
41645
41646 __module_get(THIS_MODULE);
41647
41648 @@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
41649 mutex_init(&serio->drv_mutex);
41650 device_initialize(&serio->dev);
41651 dev_set_name(&serio->dev, "serio%ld",
41652 - (long)atomic_inc_return(&serio_no) - 1);
41653 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
41654 serio->dev.bus = &serio_bus;
41655 serio->dev.release = serio_release_port;
41656 serio->dev.groups = serio_device_attr_groups;
41657 diff --git a/drivers/input/serio/serio_raw.c b/drivers/input/serio/serio_raw.c
41658 index 59df2e7..8f1cafb 100644
41659 --- a/drivers/input/serio/serio_raw.c
41660 +++ b/drivers/input/serio/serio_raw.c
41661 @@ -293,7 +293,7 @@ static irqreturn_t serio_raw_interrupt(struct serio *serio, unsigned char data,
41662
41663 static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
41664 {
41665 - static atomic_t serio_raw_no = ATOMIC_INIT(0);
41666 + static atomic_unchecked_t serio_raw_no = ATOMIC_INIT(0);
41667 struct serio_raw *serio_raw;
41668 int err;
41669
41670 @@ -304,7 +304,7 @@ static int serio_raw_connect(struct serio *serio, struct serio_driver *drv)
41671 }
41672
41673 snprintf(serio_raw->name, sizeof(serio_raw->name),
41674 - "serio_raw%ld", (long)atomic_inc_return(&serio_raw_no) - 1);
41675 + "serio_raw%ld", (long)atomic_inc_return_unchecked(&serio_raw_no) - 1);
41676 kref_init(&serio_raw->kref);
41677 INIT_LIST_HEAD(&serio_raw->client_list);
41678 init_waitqueue_head(&serio_raw->wait);
41679 diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
41680 index fbe9ca7..dbee61d 100644
41681 --- a/drivers/iommu/iommu.c
41682 +++ b/drivers/iommu/iommu.c
41683 @@ -583,7 +583,7 @@ static struct notifier_block iommu_bus_nb = {
41684 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
41685 {
41686 bus_register_notifier(bus, &iommu_bus_nb);
41687 - bus_for_each_dev(bus, NULL, ops, add_iommu_group);
41688 + bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
41689 }
41690
41691 /**
41692 diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
41693 index 39f81ae..2660096 100644
41694 --- a/drivers/iommu/irq_remapping.c
41695 +++ b/drivers/iommu/irq_remapping.c
41696 @@ -356,7 +356,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
41697 void panic_if_irq_remap(const char *msg)
41698 {
41699 if (irq_remapping_enabled)
41700 - panic(msg);
41701 + panic("%s", msg);
41702 }
41703
41704 static void ir_ack_apic_edge(struct irq_data *data)
41705 @@ -377,10 +377,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
41706
41707 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
41708 {
41709 - chip->irq_print_chip = ir_print_prefix;
41710 - chip->irq_ack = ir_ack_apic_edge;
41711 - chip->irq_eoi = ir_ack_apic_level;
41712 - chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
41713 + pax_open_kernel();
41714 + *(void **)&chip->irq_print_chip = ir_print_prefix;
41715 + *(void **)&chip->irq_ack = ir_ack_apic_edge;
41716 + *(void **)&chip->irq_eoi = ir_ack_apic_level;
41717 + *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
41718 + pax_close_kernel();
41719 }
41720
41721 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
41722 diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
41723 index d0e9480..d2b6340 100644
41724 --- a/drivers/irqchip/irq-gic.c
41725 +++ b/drivers/irqchip/irq-gic.c
41726 @@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
41727 * Supported arch specific GIC irq extension.
41728 * Default make them NULL.
41729 */
41730 -struct irq_chip gic_arch_extn = {
41731 +irq_chip_no_const gic_arch_extn = {
41732 .irq_eoi = NULL,
41733 .irq_mask = NULL,
41734 .irq_unmask = NULL,
41735 @@ -333,7 +333,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
41736 chained_irq_exit(chip, desc);
41737 }
41738
41739 -static struct irq_chip gic_chip = {
41740 +static irq_chip_no_const gic_chip __read_only = {
41741 .name = "GIC",
41742 .irq_mask = gic_mask_irq,
41743 .irq_unmask = gic_unmask_irq,
41744 diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
41745 index ac6f72b..81150f2 100644
41746 --- a/drivers/isdn/capi/capi.c
41747 +++ b/drivers/isdn/capi/capi.c
41748 @@ -81,8 +81,8 @@ struct capiminor {
41749
41750 struct capi20_appl *ap;
41751 u32 ncci;
41752 - atomic_t datahandle;
41753 - atomic_t msgid;
41754 + atomic_unchecked_t datahandle;
41755 + atomic_unchecked_t msgid;
41756
41757 struct tty_port port;
41758 int ttyinstop;
41759 @@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
41760 capimsg_setu16(s, 2, mp->ap->applid);
41761 capimsg_setu8 (s, 4, CAPI_DATA_B3);
41762 capimsg_setu8 (s, 5, CAPI_RESP);
41763 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
41764 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
41765 capimsg_setu32(s, 8, mp->ncci);
41766 capimsg_setu16(s, 12, datahandle);
41767 }
41768 @@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
41769 mp->outbytes -= len;
41770 spin_unlock_bh(&mp->outlock);
41771
41772 - datahandle = atomic_inc_return(&mp->datahandle);
41773 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
41774 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
41775 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
41776 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
41777 capimsg_setu16(skb->data, 2, mp->ap->applid);
41778 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
41779 capimsg_setu8 (skb->data, 5, CAPI_REQ);
41780 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
41781 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
41782 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
41783 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
41784 capimsg_setu16(skb->data, 16, len); /* Data length */
41785 diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
41786 index 600c79b..3752bab 100644
41787 --- a/drivers/isdn/gigaset/interface.c
41788 +++ b/drivers/isdn/gigaset/interface.c
41789 @@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
41790 }
41791 tty->driver_data = cs;
41792
41793 - ++cs->port.count;
41794 + atomic_inc(&cs->port.count);
41795
41796 - if (cs->port.count == 1) {
41797 + if (atomic_read(&cs->port.count) == 1) {
41798 tty_port_tty_set(&cs->port, tty);
41799 cs->port.low_latency = 1;
41800 }
41801 @@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
41802
41803 if (!cs->connected)
41804 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
41805 - else if (!cs->port.count)
41806 + else if (!atomic_read(&cs->port.count))
41807 dev_warn(cs->dev, "%s: device not opened\n", __func__);
41808 - else if (!--cs->port.count)
41809 + else if (!atomic_dec_return(&cs->port.count))
41810 tty_port_tty_set(&cs->port, NULL);
41811
41812 mutex_unlock(&cs->mutex);
41813 diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
41814 index d0a41cb..f0cdb8c 100644
41815 --- a/drivers/isdn/gigaset/usb-gigaset.c
41816 +++ b/drivers/isdn/gigaset/usb-gigaset.c
41817 @@ -547,7 +547,7 @@ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6])
41818 gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf);
41819 memcpy(cs->hw.usb->bchars, buf, 6);
41820 return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41,
41821 - 0, 0, &buf, 6, 2000);
41822 + 0, 0, buf, 6, 2000);
41823 }
41824
41825 static void gigaset_freebcshw(struct bc_state *bcs)
41826 diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
41827 index 4d9b195..455075c 100644
41828 --- a/drivers/isdn/hardware/avm/b1.c
41829 +++ b/drivers/isdn/hardware/avm/b1.c
41830 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
41831 }
41832 if (left) {
41833 if (t4file->user) {
41834 - if (copy_from_user(buf, dp, left))
41835 + if (left > sizeof buf || copy_from_user(buf, dp, left))
41836 return -EFAULT;
41837 } else {
41838 memcpy(buf, dp, left);
41839 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
41840 }
41841 if (left) {
41842 if (config->user) {
41843 - if (copy_from_user(buf, dp, left))
41844 + if (left > sizeof buf || copy_from_user(buf, dp, left))
41845 return -EFAULT;
41846 } else {
41847 memcpy(buf, dp, left);
41848 diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
41849 index 9bb12ba..d4262f7 100644
41850 --- a/drivers/isdn/i4l/isdn_common.c
41851 +++ b/drivers/isdn/i4l/isdn_common.c
41852 @@ -1651,6 +1651,8 @@ isdn_ioctl(struct file *file, uint cmd, ulong arg)
41853 } else
41854 return -EINVAL;
41855 case IIOCDBGVAR:
41856 + if (!capable(CAP_SYS_RAWIO))
41857 + return -EPERM;
41858 if (arg) {
41859 if (copy_to_user(argp, &dev, sizeof(ulong)))
41860 return -EFAULT;
41861 diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
41862 index 3c5f249..5fac4d0 100644
41863 --- a/drivers/isdn/i4l/isdn_tty.c
41864 +++ b/drivers/isdn/i4l/isdn_tty.c
41865 @@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
41866
41867 #ifdef ISDN_DEBUG_MODEM_OPEN
41868 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
41869 - port->count);
41870 + atomic_read(&port->count));
41871 #endif
41872 - port->count++;
41873 + atomic_inc(&port->count);
41874 port->tty = tty;
41875 /*
41876 * Start up serial port
41877 @@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
41878 #endif
41879 return;
41880 }
41881 - if ((tty->count == 1) && (port->count != 1)) {
41882 + if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
41883 /*
41884 * Uh, oh. tty->count is 1, which means that the tty
41885 * structure will be freed. Info->count should always
41886 @@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
41887 * serial port won't be shutdown.
41888 */
41889 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
41890 - "info->count is %d\n", port->count);
41891 - port->count = 1;
41892 + "info->count is %d\n", atomic_read(&port->count));
41893 + atomic_set(&port->count, 1);
41894 }
41895 - if (--port->count < 0) {
41896 + if (atomic_dec_return(&port->count) < 0) {
41897 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
41898 - info->line, port->count);
41899 - port->count = 0;
41900 + info->line, atomic_read(&port->count));
41901 + atomic_set(&port->count, 0);
41902 }
41903 - if (port->count) {
41904 + if (atomic_read(&port->count)) {
41905 #ifdef ISDN_DEBUG_MODEM_OPEN
41906 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
41907 #endif
41908 @@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
41909 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
41910 return;
41911 isdn_tty_shutdown(info);
41912 - port->count = 0;
41913 + atomic_set(&port->count, 0);
41914 port->flags &= ~ASYNC_NORMAL_ACTIVE;
41915 port->tty = NULL;
41916 wake_up_interruptible(&port->open_wait);
41917 @@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
41918 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
41919 modem_info *info = &dev->mdm.info[i];
41920
41921 - if (info->port.count == 0)
41922 + if (atomic_read(&info->port.count) == 0)
41923 continue;
41924 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
41925 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
41926 diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
41927 index e74df7c..03a03ba 100644
41928 --- a/drivers/isdn/icn/icn.c
41929 +++ b/drivers/isdn/icn/icn.c
41930 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
41931 if (count > len)
41932 count = len;
41933 if (user) {
41934 - if (copy_from_user(msg, buf, count))
41935 + if (count > sizeof msg || copy_from_user(msg, buf, count))
41936 return -EFAULT;
41937 } else
41938 memcpy(msg, buf, count);
41939 diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c
41940 index a4f05c5..1433bc5 100644
41941 --- a/drivers/isdn/mISDN/dsp_cmx.c
41942 +++ b/drivers/isdn/mISDN/dsp_cmx.c
41943 @@ -1628,7 +1628,7 @@ unsigned long dsp_spl_jiffies; /* calculate the next time to fire */
41944 static u16 dsp_count; /* last sample count */
41945 static int dsp_count_valid; /* if we have last sample count */
41946
41947 -void
41948 +void __intentional_overflow(-1)
41949 dsp_cmx_send(void *arg)
41950 {
41951 struct dsp_conf *conf;
41952 diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
41953 index d93e245..e7ece6b 100644
41954 --- a/drivers/leds/leds-clevo-mail.c
41955 +++ b/drivers/leds/leds-clevo-mail.c
41956 @@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
41957 * detected as working, but in reality it is not) as low as
41958 * possible.
41959 */
41960 -static struct dmi_system_id clevo_mail_led_dmi_table[] __initdata = {
41961 +static struct dmi_system_id clevo_mail_led_dmi_table[] __initconst = {
41962 {
41963 .callback = clevo_mail_led_dmi_callback,
41964 .ident = "Clevo D410J",
41965 diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
41966 index 5b8f938..b73d657 100644
41967 --- a/drivers/leds/leds-ss4200.c
41968 +++ b/drivers/leds/leds-ss4200.c
41969 @@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
41970 * detected as working, but in reality it is not) as low as
41971 * possible.
41972 */
41973 -static struct dmi_system_id nas_led_whitelist[] __initdata = {
41974 +static struct dmi_system_id nas_led_whitelist[] __initconst = {
41975 {
41976 .callback = ss4200_led_dmi_callback,
41977 .ident = "Intel SS4200-E",
41978 diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
41979 index 0bf1e4e..b4bf44e 100644
41980 --- a/drivers/lguest/core.c
41981 +++ b/drivers/lguest/core.c
41982 @@ -97,9 +97,17 @@ static __init int map_switcher(void)
41983 * The end address needs +1 because __get_vm_area allocates an
41984 * extra guard page, so we need space for that.
41985 */
41986 +
41987 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
41988 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
41989 + VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
41990 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
41991 +#else
41992 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
41993 VM_ALLOC, switcher_addr, switcher_addr
41994 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
41995 +#endif
41996 +
41997 if (!switcher_vma) {
41998 err = -ENOMEM;
41999 printk("lguest: could not map switcher pages high\n");
42000 @@ -124,7 +132,7 @@ static __init int map_switcher(void)
42001 * Now the Switcher is mapped at the right address, we can't fail!
42002 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
42003 */
42004 - memcpy(switcher_vma->addr, start_switcher_text,
42005 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
42006 end_switcher_text - start_switcher_text);
42007
42008 printk(KERN_INFO "lguest: mapped switcher at %p\n",
42009 diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
42010 index bfb39bb..08a603b 100644
42011 --- a/drivers/lguest/page_tables.c
42012 +++ b/drivers/lguest/page_tables.c
42013 @@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
42014 /*:*/
42015
42016 #ifdef CONFIG_X86_PAE
42017 -static void release_pmd(pmd_t *spmd)
42018 +static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
42019 {
42020 /* If the entry's not present, there's nothing to release. */
42021 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
42022 diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
42023 index 5169239..47cb4db 100644
42024 --- a/drivers/lguest/x86/core.c
42025 +++ b/drivers/lguest/x86/core.c
42026 @@ -59,7 +59,7 @@ static struct {
42027 /* Offset from where switcher.S was compiled to where we've copied it */
42028 static unsigned long switcher_offset(void)
42029 {
42030 - return switcher_addr - (unsigned long)start_switcher_text;
42031 + return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
42032 }
42033
42034 /* This cpu's struct lguest_pages (after the Switcher text page) */
42035 @@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
42036 * These copies are pretty cheap, so we do them unconditionally: */
42037 /* Save the current Host top-level page directory.
42038 */
42039 +
42040 +#ifdef CONFIG_PAX_PER_CPU_PGD
42041 + pages->state.host_cr3 = read_cr3();
42042 +#else
42043 pages->state.host_cr3 = __pa(current->mm->pgd);
42044 +#endif
42045 +
42046 /*
42047 * Set up the Guest's page tables to see this CPU's pages (and no
42048 * other CPU's pages).
42049 @@ -475,7 +481,7 @@ void __init lguest_arch_host_init(void)
42050 * compiled-in switcher code and the high-mapped copy we just made.
42051 */
42052 for (i = 0; i < IDT_ENTRIES; i++)
42053 - default_idt_entries[i] += switcher_offset();
42054 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
42055
42056 /*
42057 * Set up the Switcher's per-cpu areas.
42058 @@ -558,7 +564,7 @@ void __init lguest_arch_host_init(void)
42059 * it will be undisturbed when we switch. To change %cs and jump we
42060 * need this structure to feed to Intel's "lcall" instruction.
42061 */
42062 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
42063 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
42064 lguest_entry.segment = LGUEST_CS;
42065
42066 /*
42067 diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
42068 index 40634b0..4f5855e 100644
42069 --- a/drivers/lguest/x86/switcher_32.S
42070 +++ b/drivers/lguest/x86/switcher_32.S
42071 @@ -87,6 +87,7 @@
42072 #include <asm/page.h>
42073 #include <asm/segment.h>
42074 #include <asm/lguest.h>
42075 +#include <asm/processor-flags.h>
42076
42077 // We mark the start of the code to copy
42078 // It's placed in .text tho it's never run here
42079 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
42080 // Changes type when we load it: damn Intel!
42081 // For after we switch over our page tables
42082 // That entry will be read-only: we'd crash.
42083 +
42084 +#ifdef CONFIG_PAX_KERNEXEC
42085 + mov %cr0, %edx
42086 + xor $X86_CR0_WP, %edx
42087 + mov %edx, %cr0
42088 +#endif
42089 +
42090 movl $(GDT_ENTRY_TSS*8), %edx
42091 ltr %dx
42092
42093 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
42094 // Let's clear it again for our return.
42095 // The GDT descriptor of the Host
42096 // Points to the table after two "size" bytes
42097 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
42098 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
42099 // Clear "used" from type field (byte 5, bit 2)
42100 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
42101 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
42102 +
42103 +#ifdef CONFIG_PAX_KERNEXEC
42104 + mov %cr0, %eax
42105 + xor $X86_CR0_WP, %eax
42106 + mov %eax, %cr0
42107 +#endif
42108
42109 // Once our page table's switched, the Guest is live!
42110 // The Host fades as we run this final step.
42111 @@ -295,13 +309,12 @@ deliver_to_host:
42112 // I consulted gcc, and it gave
42113 // These instructions, which I gladly credit:
42114 leal (%edx,%ebx,8), %eax
42115 - movzwl (%eax),%edx
42116 - movl 4(%eax), %eax
42117 - xorw %ax, %ax
42118 - orl %eax, %edx
42119 + movl 4(%eax), %edx
42120 + movw (%eax), %dx
42121 // Now the address of the handler's in %edx
42122 // We call it now: its "iret" drops us home.
42123 - jmp *%edx
42124 + ljmp $__KERNEL_CS, $1f
42125 +1: jmp *%edx
42126
42127 // Every interrupt can come to us here
42128 // But we must truly tell each apart.
42129 diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
42130 index 0003992..854bbce 100644
42131 --- a/drivers/md/bcache/closure.h
42132 +++ b/drivers/md/bcache/closure.h
42133 @@ -622,7 +622,7 @@ static inline void closure_wake_up(struct closure_waitlist *list)
42134 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
42135 struct workqueue_struct *wq)
42136 {
42137 - BUG_ON(object_is_on_stack(cl));
42138 + BUG_ON(object_starts_on_stack(cl));
42139 closure_set_ip(cl);
42140 cl->fn = fn;
42141 cl->wq = wq;
42142 diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
42143 index 547c4c5..5be1de4 100644
42144 --- a/drivers/md/bcache/super.c
42145 +++ b/drivers/md/bcache/super.c
42146 @@ -1644,7 +1644,7 @@ err_unlock_gc:
42147 err:
42148 closure_sync(&op.cl);
42149 /* XXX: test this, it's broken */
42150 - bch_cache_set_error(c, err);
42151 + bch_cache_set_error(c, "%s", err);
42152 }
42153
42154 static bool can_attach_cache(struct cache *ca, struct cache_set *c)
42155 diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
42156 index a7fd821..9dcf6c3 100644
42157 --- a/drivers/md/bitmap.c
42158 +++ b/drivers/md/bitmap.c
42159 @@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
42160 chunk_kb ? "KB" : "B");
42161 if (bitmap->storage.file) {
42162 seq_printf(seq, ", file: ");
42163 - seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
42164 + seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
42165 }
42166
42167 seq_printf(seq, "\n");
42168 diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
42169 index afe0814..8cf3794 100644
42170 --- a/drivers/md/dm-ioctl.c
42171 +++ b/drivers/md/dm-ioctl.c
42172 @@ -1745,7 +1745,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
42173 cmd == DM_LIST_VERSIONS_CMD)
42174 return 0;
42175
42176 - if ((cmd == DM_DEV_CREATE_CMD)) {
42177 + if (cmd == DM_DEV_CREATE_CMD) {
42178 if (!*param->name) {
42179 DMWARN("name not supplied when creating device");
42180 return -EINVAL;
42181 diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
42182 index 9584443..9fc9ac9 100644
42183 --- a/drivers/md/dm-raid1.c
42184 +++ b/drivers/md/dm-raid1.c
42185 @@ -40,7 +40,7 @@ enum dm_raid1_error {
42186
42187 struct mirror {
42188 struct mirror_set *ms;
42189 - atomic_t error_count;
42190 + atomic_unchecked_t error_count;
42191 unsigned long error_type;
42192 struct dm_dev *dev;
42193 sector_t offset;
42194 @@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
42195 struct mirror *m;
42196
42197 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
42198 - if (!atomic_read(&m->error_count))
42199 + if (!atomic_read_unchecked(&m->error_count))
42200 return m;
42201
42202 return NULL;
42203 @@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
42204 * simple way to tell if a device has encountered
42205 * errors.
42206 */
42207 - atomic_inc(&m->error_count);
42208 + atomic_inc_unchecked(&m->error_count);
42209
42210 if (test_and_set_bit(error_type, &m->error_type))
42211 return;
42212 @@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
42213 struct mirror *m = get_default_mirror(ms);
42214
42215 do {
42216 - if (likely(!atomic_read(&m->error_count)))
42217 + if (likely(!atomic_read_unchecked(&m->error_count)))
42218 return m;
42219
42220 if (m-- == ms->mirror)
42221 @@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
42222 {
42223 struct mirror *default_mirror = get_default_mirror(m->ms);
42224
42225 - return !atomic_read(&default_mirror->error_count);
42226 + return !atomic_read_unchecked(&default_mirror->error_count);
42227 }
42228
42229 static int mirror_available(struct mirror_set *ms, struct bio *bio)
42230 @@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
42231 */
42232 if (likely(region_in_sync(ms, region, 1)))
42233 m = choose_mirror(ms, bio->bi_sector);
42234 - else if (m && atomic_read(&m->error_count))
42235 + else if (m && atomic_read_unchecked(&m->error_count))
42236 m = NULL;
42237
42238 if (likely(m))
42239 @@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
42240 }
42241
42242 ms->mirror[mirror].ms = ms;
42243 - atomic_set(&(ms->mirror[mirror].error_count), 0);
42244 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
42245 ms->mirror[mirror].error_type = 0;
42246 ms->mirror[mirror].offset = offset;
42247
42248 @@ -1339,7 +1339,7 @@ static void mirror_resume(struct dm_target *ti)
42249 */
42250 static char device_status_char(struct mirror *m)
42251 {
42252 - if (!atomic_read(&(m->error_count)))
42253 + if (!atomic_read_unchecked(&(m->error_count)))
42254 return 'A';
42255
42256 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
42257 diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
42258 index 28a9012..9c0f6a5 100644
42259 --- a/drivers/md/dm-stats.c
42260 +++ b/drivers/md/dm-stats.c
42261 @@ -382,7 +382,7 @@ do_sync_free:
42262 synchronize_rcu_expedited();
42263 dm_stat_free(&s->rcu_head);
42264 } else {
42265 - ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1;
42266 + ACCESS_ONCE_RW(dm_stat_need_rcu_barrier) = 1;
42267 call_rcu(&s->rcu_head, dm_stat_free);
42268 }
42269 return 0;
42270 @@ -554,8 +554,8 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
42271 ((bi_rw & (REQ_WRITE | REQ_DISCARD)) ==
42272 (ACCESS_ONCE(last->last_rw) & (REQ_WRITE | REQ_DISCARD)))
42273 ));
42274 - ACCESS_ONCE(last->last_sector) = end_sector;
42275 - ACCESS_ONCE(last->last_rw) = bi_rw;
42276 + ACCESS_ONCE_RW(last->last_sector) = end_sector;
42277 + ACCESS_ONCE_RW(last->last_rw) = bi_rw;
42278 }
42279
42280 rcu_read_lock();
42281 diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
42282 index 73c1712..7347292 100644
42283 --- a/drivers/md/dm-stripe.c
42284 +++ b/drivers/md/dm-stripe.c
42285 @@ -21,7 +21,7 @@ struct stripe {
42286 struct dm_dev *dev;
42287 sector_t physical_start;
42288
42289 - atomic_t error_count;
42290 + atomic_unchecked_t error_count;
42291 };
42292
42293 struct stripe_c {
42294 @@ -186,7 +186,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
42295 kfree(sc);
42296 return r;
42297 }
42298 - atomic_set(&(sc->stripe[i].error_count), 0);
42299 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
42300 }
42301
42302 ti->private = sc;
42303 @@ -327,7 +327,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
42304 DMEMIT("%d ", sc->stripes);
42305 for (i = 0; i < sc->stripes; i++) {
42306 DMEMIT("%s ", sc->stripe[i].dev->name);
42307 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
42308 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
42309 'D' : 'A';
42310 }
42311 buffer[i] = '\0';
42312 @@ -372,8 +372,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
42313 */
42314 for (i = 0; i < sc->stripes; i++)
42315 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
42316 - atomic_inc(&(sc->stripe[i].error_count));
42317 - if (atomic_read(&(sc->stripe[i].error_count)) <
42318 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
42319 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
42320 DM_IO_ERROR_THRESHOLD)
42321 schedule_work(&sc->trigger_event);
42322 }
42323 diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
42324 index 20a8cc0..5447b11 100644
42325 --- a/drivers/md/dm-table.c
42326 +++ b/drivers/md/dm-table.c
42327 @@ -291,7 +291,7 @@ static struct dm_dev_internal *find_device(struct list_head *l, dev_t dev)
42328 static int open_dev(struct dm_dev_internal *d, dev_t dev,
42329 struct mapped_device *md)
42330 {
42331 - static char *_claim_ptr = "I belong to device-mapper";
42332 + static char _claim_ptr[] = "I belong to device-mapper";
42333 struct block_device *bdev;
42334
42335 int r;
42336 @@ -359,7 +359,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
42337 if (!dev_size)
42338 return 0;
42339
42340 - if ((start >= dev_size) || (start + len > dev_size)) {
42341 + if ((start >= dev_size) || (len > dev_size - start)) {
42342 DMWARN("%s: %s too small for target: "
42343 "start=%llu, len=%llu, dev_size=%llu",
42344 dm_device_name(ti->table->md), bdevname(bdev, b),
42345 diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
42346 index 8a30ad5..72792d3 100644
42347 --- a/drivers/md/dm-thin-metadata.c
42348 +++ b/drivers/md/dm-thin-metadata.c
42349 @@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
42350 {
42351 pmd->info.tm = pmd->tm;
42352 pmd->info.levels = 2;
42353 - pmd->info.value_type.context = pmd->data_sm;
42354 + pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
42355 pmd->info.value_type.size = sizeof(__le64);
42356 pmd->info.value_type.inc = data_block_inc;
42357 pmd->info.value_type.dec = data_block_dec;
42358 @@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
42359
42360 pmd->bl_info.tm = pmd->tm;
42361 pmd->bl_info.levels = 1;
42362 - pmd->bl_info.value_type.context = pmd->data_sm;
42363 + pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
42364 pmd->bl_info.value_type.size = sizeof(__le64);
42365 pmd->bl_info.value_type.inc = data_block_inc;
42366 pmd->bl_info.value_type.dec = data_block_dec;
42367 diff --git a/drivers/md/dm.c b/drivers/md/dm.c
42368 index b3e26c7..1efca94 100644
42369 --- a/drivers/md/dm.c
42370 +++ b/drivers/md/dm.c
42371 @@ -179,9 +179,9 @@ struct mapped_device {
42372 /*
42373 * Event handling.
42374 */
42375 - atomic_t event_nr;
42376 + atomic_unchecked_t event_nr;
42377 wait_queue_head_t eventq;
42378 - atomic_t uevent_seq;
42379 + atomic_unchecked_t uevent_seq;
42380 struct list_head uevent_list;
42381 spinlock_t uevent_lock; /* Protect access to uevent_list */
42382
42383 @@ -1985,8 +1985,8 @@ static struct mapped_device *alloc_dev(int minor)
42384 spin_lock_init(&md->deferred_lock);
42385 atomic_set(&md->holders, 1);
42386 atomic_set(&md->open_count, 0);
42387 - atomic_set(&md->event_nr, 0);
42388 - atomic_set(&md->uevent_seq, 0);
42389 + atomic_set_unchecked(&md->event_nr, 0);
42390 + atomic_set_unchecked(&md->uevent_seq, 0);
42391 INIT_LIST_HEAD(&md->uevent_list);
42392 spin_lock_init(&md->uevent_lock);
42393
42394 @@ -2139,7 +2139,7 @@ static void event_callback(void *context)
42395
42396 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
42397
42398 - atomic_inc(&md->event_nr);
42399 + atomic_inc_unchecked(&md->event_nr);
42400 wake_up(&md->eventq);
42401 }
42402
42403 @@ -2832,18 +2832,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
42404
42405 uint32_t dm_next_uevent_seq(struct mapped_device *md)
42406 {
42407 - return atomic_add_return(1, &md->uevent_seq);
42408 + return atomic_add_return_unchecked(1, &md->uevent_seq);
42409 }
42410
42411 uint32_t dm_get_event_nr(struct mapped_device *md)
42412 {
42413 - return atomic_read(&md->event_nr);
42414 + return atomic_read_unchecked(&md->event_nr);
42415 }
42416
42417 int dm_wait_event(struct mapped_device *md, int event_nr)
42418 {
42419 return wait_event_interruptible(md->eventq,
42420 - (event_nr != atomic_read(&md->event_nr)));
42421 + (event_nr != atomic_read_unchecked(&md->event_nr)));
42422 }
42423
42424 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
42425 diff --git a/drivers/md/md.c b/drivers/md/md.c
42426 index ba46d97..f8f5019 100644
42427 --- a/drivers/md/md.c
42428 +++ b/drivers/md/md.c
42429 @@ -234,10 +234,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
42430 * start build, activate spare
42431 */
42432 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
42433 -static atomic_t md_event_count;
42434 +static atomic_unchecked_t md_event_count;
42435 void md_new_event(struct mddev *mddev)
42436 {
42437 - atomic_inc(&md_event_count);
42438 + atomic_inc_unchecked(&md_event_count);
42439 wake_up(&md_event_waiters);
42440 }
42441 EXPORT_SYMBOL_GPL(md_new_event);
42442 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
42443 */
42444 static void md_new_event_inintr(struct mddev *mddev)
42445 {
42446 - atomic_inc(&md_event_count);
42447 + atomic_inc_unchecked(&md_event_count);
42448 wake_up(&md_event_waiters);
42449 }
42450
42451 @@ -1502,7 +1502,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
42452 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
42453 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
42454 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
42455 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
42456 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
42457
42458 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
42459 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
42460 @@ -1746,7 +1746,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
42461 else
42462 sb->resync_offset = cpu_to_le64(0);
42463
42464 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
42465 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
42466
42467 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
42468 sb->size = cpu_to_le64(mddev->dev_sectors);
42469 @@ -2751,7 +2751,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
42470 static ssize_t
42471 errors_show(struct md_rdev *rdev, char *page)
42472 {
42473 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
42474 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
42475 }
42476
42477 static ssize_t
42478 @@ -2760,7 +2760,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
42479 char *e;
42480 unsigned long n = simple_strtoul(buf, &e, 10);
42481 if (*buf && (*e == 0 || *e == '\n')) {
42482 - atomic_set(&rdev->corrected_errors, n);
42483 + atomic_set_unchecked(&rdev->corrected_errors, n);
42484 return len;
42485 }
42486 return -EINVAL;
42487 @@ -3208,8 +3208,8 @@ int md_rdev_init(struct md_rdev *rdev)
42488 rdev->sb_loaded = 0;
42489 rdev->bb_page = NULL;
42490 atomic_set(&rdev->nr_pending, 0);
42491 - atomic_set(&rdev->read_errors, 0);
42492 - atomic_set(&rdev->corrected_errors, 0);
42493 + atomic_set_unchecked(&rdev->read_errors, 0);
42494 + atomic_set_unchecked(&rdev->corrected_errors, 0);
42495
42496 INIT_LIST_HEAD(&rdev->same_set);
42497 init_waitqueue_head(&rdev->blocked_wait);
42498 @@ -7043,7 +7043,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
42499
42500 spin_unlock(&pers_lock);
42501 seq_printf(seq, "\n");
42502 - seq->poll_event = atomic_read(&md_event_count);
42503 + seq->poll_event = atomic_read_unchecked(&md_event_count);
42504 return 0;
42505 }
42506 if (v == (void*)2) {
42507 @@ -7146,7 +7146,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
42508 return error;
42509
42510 seq = file->private_data;
42511 - seq->poll_event = atomic_read(&md_event_count);
42512 + seq->poll_event = atomic_read_unchecked(&md_event_count);
42513 return error;
42514 }
42515
42516 @@ -7160,7 +7160,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
42517 /* always allow read */
42518 mask = POLLIN | POLLRDNORM;
42519
42520 - if (seq->poll_event != atomic_read(&md_event_count))
42521 + if (seq->poll_event != atomic_read_unchecked(&md_event_count))
42522 mask |= POLLERR | POLLPRI;
42523 return mask;
42524 }
42525 @@ -7204,7 +7204,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
42526 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
42527 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
42528 (int)part_stat_read(&disk->part0, sectors[1]) -
42529 - atomic_read(&disk->sync_io);
42530 + atomic_read_unchecked(&disk->sync_io);
42531 /* sync IO will cause sync_io to increase before the disk_stats
42532 * as sync_io is counted when a request starts, and
42533 * disk_stats is counted when it completes.
42534 diff --git a/drivers/md/md.h b/drivers/md/md.h
42535 index 608050c..6e77db5d 100644
42536 --- a/drivers/md/md.h
42537 +++ b/drivers/md/md.h
42538 @@ -94,13 +94,13 @@ struct md_rdev {
42539 * only maintained for arrays that
42540 * support hot removal
42541 */
42542 - atomic_t read_errors; /* number of consecutive read errors that
42543 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
42544 * we have tried to ignore.
42545 */
42546 struct timespec last_read_error; /* monotonic time since our
42547 * last read error
42548 */
42549 - atomic_t corrected_errors; /* number of corrected read errors,
42550 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
42551 * for reporting to userspace and storing
42552 * in superblock.
42553 */
42554 @@ -446,7 +446,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
42555
42556 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
42557 {
42558 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
42559 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
42560 }
42561
42562 struct md_personality
42563 diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
42564 index 3e6d115..ffecdeb 100644
42565 --- a/drivers/md/persistent-data/dm-space-map.h
42566 +++ b/drivers/md/persistent-data/dm-space-map.h
42567 @@ -71,6 +71,7 @@ struct dm_space_map {
42568 dm_sm_threshold_fn fn,
42569 void *context);
42570 };
42571 +typedef struct dm_space_map __no_const dm_space_map_no_const;
42572
42573 /*----------------------------------------------------------------*/
42574
42575 diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
42576 index aacf6bf..67d63f2 100644
42577 --- a/drivers/md/raid1.c
42578 +++ b/drivers/md/raid1.c
42579 @@ -1824,7 +1824,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
42580 if (r1_sync_page_io(rdev, sect, s,
42581 bio->bi_io_vec[idx].bv_page,
42582 READ) != 0)
42583 - atomic_add(s, &rdev->corrected_errors);
42584 + atomic_add_unchecked(s, &rdev->corrected_errors);
42585 }
42586 sectors -= s;
42587 sect += s;
42588 @@ -2051,7 +2051,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
42589 test_bit(In_sync, &rdev->flags)) {
42590 if (r1_sync_page_io(rdev, sect, s,
42591 conf->tmppage, READ)) {
42592 - atomic_add(s, &rdev->corrected_errors);
42593 + atomic_add_unchecked(s, &rdev->corrected_errors);
42594 printk(KERN_INFO
42595 "md/raid1:%s: read error corrected "
42596 "(%d sectors at %llu on %s)\n",
42597 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
42598 index 73dc8a3..bdd515a 100644
42599 --- a/drivers/md/raid10.c
42600 +++ b/drivers/md/raid10.c
42601 @@ -1963,7 +1963,7 @@ static void end_sync_read(struct bio *bio, int error)
42602 /* The write handler will notice the lack of
42603 * R10BIO_Uptodate and record any errors etc
42604 */
42605 - atomic_add(r10_bio->sectors,
42606 + atomic_add_unchecked(r10_bio->sectors,
42607 &conf->mirrors[d].rdev->corrected_errors);
42608
42609 /* for reconstruct, we always reschedule after a read.
42610 @@ -2321,7 +2321,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
42611 {
42612 struct timespec cur_time_mon;
42613 unsigned long hours_since_last;
42614 - unsigned int read_errors = atomic_read(&rdev->read_errors);
42615 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
42616
42617 ktime_get_ts(&cur_time_mon);
42618
42619 @@ -2343,9 +2343,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
42620 * overflowing the shift of read_errors by hours_since_last.
42621 */
42622 if (hours_since_last >= 8 * sizeof(read_errors))
42623 - atomic_set(&rdev->read_errors, 0);
42624 + atomic_set_unchecked(&rdev->read_errors, 0);
42625 else
42626 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
42627 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
42628 }
42629
42630 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
42631 @@ -2399,8 +2399,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42632 return;
42633
42634 check_decay_read_errors(mddev, rdev);
42635 - atomic_inc(&rdev->read_errors);
42636 - if (atomic_read(&rdev->read_errors) > max_read_errors) {
42637 + atomic_inc_unchecked(&rdev->read_errors);
42638 + if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
42639 char b[BDEVNAME_SIZE];
42640 bdevname(rdev->bdev, b);
42641
42642 @@ -2408,7 +2408,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42643 "md/raid10:%s: %s: Raid device exceeded "
42644 "read_error threshold [cur %d:max %d]\n",
42645 mdname(mddev), b,
42646 - atomic_read(&rdev->read_errors), max_read_errors);
42647 + atomic_read_unchecked(&rdev->read_errors), max_read_errors);
42648 printk(KERN_NOTICE
42649 "md/raid10:%s: %s: Failing raid device\n",
42650 mdname(mddev), b);
42651 @@ -2563,7 +2563,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
42652 sect +
42653 choose_data_offset(r10_bio, rdev)),
42654 bdevname(rdev->bdev, b));
42655 - atomic_add(s, &rdev->corrected_errors);
42656 + atomic_add_unchecked(s, &rdev->corrected_errors);
42657 }
42658
42659 rdev_dec_pending(rdev, mddev);
42660 diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
42661 index 8a0665d..984c46d 100644
42662 --- a/drivers/md/raid5.c
42663 +++ b/drivers/md/raid5.c
42664 @@ -1887,21 +1887,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
42665 mdname(conf->mddev), STRIPE_SECTORS,
42666 (unsigned long long)s,
42667 bdevname(rdev->bdev, b));
42668 - atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
42669 + atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
42670 clear_bit(R5_ReadError, &sh->dev[i].flags);
42671 clear_bit(R5_ReWrite, &sh->dev[i].flags);
42672 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
42673 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
42674
42675 - if (atomic_read(&rdev->read_errors))
42676 - atomic_set(&rdev->read_errors, 0);
42677 + if (atomic_read_unchecked(&rdev->read_errors))
42678 + atomic_set_unchecked(&rdev->read_errors, 0);
42679 } else {
42680 const char *bdn = bdevname(rdev->bdev, b);
42681 int retry = 0;
42682 int set_bad = 0;
42683
42684 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
42685 - atomic_inc(&rdev->read_errors);
42686 + atomic_inc_unchecked(&rdev->read_errors);
42687 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
42688 printk_ratelimited(
42689 KERN_WARNING
42690 @@ -1929,7 +1929,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
42691 mdname(conf->mddev),
42692 (unsigned long long)s,
42693 bdn);
42694 - } else if (atomic_read(&rdev->read_errors)
42695 + } else if (atomic_read_unchecked(&rdev->read_errors)
42696 > conf->max_nr_stripes)
42697 printk(KERN_WARNING
42698 "md/raid:%s: Too many read errors, failing device %s.\n",
42699 diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
42700 index 401ef64..836e563 100644
42701 --- a/drivers/media/dvb-core/dvbdev.c
42702 +++ b/drivers/media/dvb-core/dvbdev.c
42703 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
42704 const struct dvb_device *template, void *priv, int type)
42705 {
42706 struct dvb_device *dvbdev;
42707 - struct file_operations *dvbdevfops;
42708 + file_operations_no_const *dvbdevfops;
42709 struct device *clsdev;
42710 int minor;
42711 int id;
42712 diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
42713 index 9b6c3bb..baeb5c7 100644
42714 --- a/drivers/media/dvb-frontends/dib3000.h
42715 +++ b/drivers/media/dvb-frontends/dib3000.h
42716 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
42717 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
42718 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
42719 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
42720 -};
42721 +} __no_const;
42722
42723 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
42724 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
42725 diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
42726 index ecf21d9..b992428d 100644
42727 --- a/drivers/media/pci/cx88/cx88-video.c
42728 +++ b/drivers/media/pci/cx88/cx88-video.c
42729 @@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
42730
42731 /* ------------------------------------------------------------------ */
42732
42733 -static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42734 -static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42735 -static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42736 +static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42737 +static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42738 +static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
42739
42740 module_param_array(video_nr, int, NULL, 0444);
42741 module_param_array(vbi_nr, int, NULL, 0444);
42742 diff --git a/drivers/media/pci/ivtv/ivtv-driver.c b/drivers/media/pci/ivtv/ivtv-driver.c
42743 index c08ae3e..eb59af1 100644
42744 --- a/drivers/media/pci/ivtv/ivtv-driver.c
42745 +++ b/drivers/media/pci/ivtv/ivtv-driver.c
42746 @@ -83,7 +83,7 @@ static struct pci_device_id ivtv_pci_tbl[] = {
42747 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
42748
42749 /* ivtv instance counter */
42750 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
42751 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
42752
42753 /* Parameter declarations */
42754 static int cardtype[IVTV_MAX_CARDS];
42755 diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
42756 index dfd0a21..6bbb465 100644
42757 --- a/drivers/media/platform/omap/omap_vout.c
42758 +++ b/drivers/media/platform/omap/omap_vout.c
42759 @@ -63,7 +63,6 @@ enum omap_vout_channels {
42760 OMAP_VIDEO2,
42761 };
42762
42763 -static struct videobuf_queue_ops video_vbq_ops;
42764 /* Variables configurable through module params*/
42765 static u32 video1_numbuffers = 3;
42766 static u32 video2_numbuffers = 3;
42767 @@ -1014,6 +1013,12 @@ static int omap_vout_open(struct file *file)
42768 {
42769 struct videobuf_queue *q;
42770 struct omap_vout_device *vout = NULL;
42771 + static struct videobuf_queue_ops video_vbq_ops = {
42772 + .buf_setup = omap_vout_buffer_setup,
42773 + .buf_prepare = omap_vout_buffer_prepare,
42774 + .buf_release = omap_vout_buffer_release,
42775 + .buf_queue = omap_vout_buffer_queue,
42776 + };
42777
42778 vout = video_drvdata(file);
42779 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
42780 @@ -1031,10 +1036,6 @@ static int omap_vout_open(struct file *file)
42781 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
42782
42783 q = &vout->vbq;
42784 - video_vbq_ops.buf_setup = omap_vout_buffer_setup;
42785 - video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
42786 - video_vbq_ops.buf_release = omap_vout_buffer_release;
42787 - video_vbq_ops.buf_queue = omap_vout_buffer_queue;
42788 spin_lock_init(&vout->vbq_lock);
42789
42790 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
42791 diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
42792 index 04e6490..2df65bf 100644
42793 --- a/drivers/media/platform/s5p-tv/mixer.h
42794 +++ b/drivers/media/platform/s5p-tv/mixer.h
42795 @@ -156,7 +156,7 @@ struct mxr_layer {
42796 /** layer index (unique identifier) */
42797 int idx;
42798 /** callbacks for layer methods */
42799 - struct mxr_layer_ops ops;
42800 + struct mxr_layer_ops *ops;
42801 /** format array */
42802 const struct mxr_format **fmt_array;
42803 /** size of format array */
42804 diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42805 index b93a21f..2535195 100644
42806 --- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42807 +++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
42808 @@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
42809 {
42810 struct mxr_layer *layer;
42811 int ret;
42812 - struct mxr_layer_ops ops = {
42813 + static struct mxr_layer_ops ops = {
42814 .release = mxr_graph_layer_release,
42815 .buffer_set = mxr_graph_buffer_set,
42816 .stream_set = mxr_graph_stream_set,
42817 diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
42818 index b713403..53cb5ad 100644
42819 --- a/drivers/media/platform/s5p-tv/mixer_reg.c
42820 +++ b/drivers/media/platform/s5p-tv/mixer_reg.c
42821 @@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
42822 layer->update_buf = next;
42823 }
42824
42825 - layer->ops.buffer_set(layer, layer->update_buf);
42826 + layer->ops->buffer_set(layer, layer->update_buf);
42827
42828 if (done && done != layer->shadow_buf)
42829 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
42830 diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
42831 index 641b1f0..49cff30 100644
42832 --- a/drivers/media/platform/s5p-tv/mixer_video.c
42833 +++ b/drivers/media/platform/s5p-tv/mixer_video.c
42834 @@ -210,7 +210,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
42835 layer->geo.src.height = layer->geo.src.full_height;
42836
42837 mxr_geometry_dump(mdev, &layer->geo);
42838 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42839 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42840 mxr_geometry_dump(mdev, &layer->geo);
42841 }
42842
42843 @@ -228,7 +228,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
42844 layer->geo.dst.full_width = mbus_fmt.width;
42845 layer->geo.dst.full_height = mbus_fmt.height;
42846 layer->geo.dst.field = mbus_fmt.field;
42847 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42848 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
42849
42850 mxr_geometry_dump(mdev, &layer->geo);
42851 }
42852 @@ -334,7 +334,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
42853 /* set source size to highest accepted value */
42854 geo->src.full_width = max(geo->dst.full_width, pix->width);
42855 geo->src.full_height = max(geo->dst.full_height, pix->height);
42856 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42857 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42858 mxr_geometry_dump(mdev, &layer->geo);
42859 /* set cropping to total visible screen */
42860 geo->src.width = pix->width;
42861 @@ -342,12 +342,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
42862 geo->src.x_offset = 0;
42863 geo->src.y_offset = 0;
42864 /* assure consistency of geometry */
42865 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
42866 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
42867 mxr_geometry_dump(mdev, &layer->geo);
42868 /* set full size to lowest possible value */
42869 geo->src.full_width = 0;
42870 geo->src.full_height = 0;
42871 - layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42872 + layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
42873 mxr_geometry_dump(mdev, &layer->geo);
42874
42875 /* returning results */
42876 @@ -474,7 +474,7 @@ static int mxr_s_selection(struct file *file, void *fh,
42877 target->width = s->r.width;
42878 target->height = s->r.height;
42879
42880 - layer->ops.fix_geometry(layer, stage, s->flags);
42881 + layer->ops->fix_geometry(layer, stage, s->flags);
42882
42883 /* retrieve update selection rectangle */
42884 res.left = target->x_offset;
42885 @@ -955,13 +955,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
42886 mxr_output_get(mdev);
42887
42888 mxr_layer_update_output(layer);
42889 - layer->ops.format_set(layer);
42890 + layer->ops->format_set(layer);
42891 /* enabling layer in hardware */
42892 spin_lock_irqsave(&layer->enq_slock, flags);
42893 layer->state = MXR_LAYER_STREAMING;
42894 spin_unlock_irqrestore(&layer->enq_slock, flags);
42895
42896 - layer->ops.stream_set(layer, MXR_ENABLE);
42897 + layer->ops->stream_set(layer, MXR_ENABLE);
42898 mxr_streamer_get(mdev);
42899
42900 return 0;
42901 @@ -1031,7 +1031,7 @@ static int stop_streaming(struct vb2_queue *vq)
42902 spin_unlock_irqrestore(&layer->enq_slock, flags);
42903
42904 /* disabling layer in hardware */
42905 - layer->ops.stream_set(layer, MXR_DISABLE);
42906 + layer->ops->stream_set(layer, MXR_DISABLE);
42907 /* remove one streamer */
42908 mxr_streamer_put(mdev);
42909 /* allow changes in output configuration */
42910 @@ -1070,8 +1070,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
42911
42912 void mxr_layer_release(struct mxr_layer *layer)
42913 {
42914 - if (layer->ops.release)
42915 - layer->ops.release(layer);
42916 + if (layer->ops->release)
42917 + layer->ops->release(layer);
42918 }
42919
42920 void mxr_base_layer_release(struct mxr_layer *layer)
42921 @@ -1097,7 +1097,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
42922
42923 layer->mdev = mdev;
42924 layer->idx = idx;
42925 - layer->ops = *ops;
42926 + layer->ops = ops;
42927
42928 spin_lock_init(&layer->enq_slock);
42929 INIT_LIST_HEAD(&layer->enq_list);
42930 diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42931 index 3d13a63..da31bf1 100644
42932 --- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42933 +++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
42934 @@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
42935 {
42936 struct mxr_layer *layer;
42937 int ret;
42938 - struct mxr_layer_ops ops = {
42939 + static struct mxr_layer_ops ops = {
42940 .release = mxr_vp_layer_release,
42941 .buffer_set = mxr_vp_buffer_set,
42942 .stream_set = mxr_vp_stream_set,
42943 diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
42944 index 545c04c..a14bded 100644
42945 --- a/drivers/media/radio/radio-cadet.c
42946 +++ b/drivers/media/radio/radio-cadet.c
42947 @@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
42948 unsigned char readbuf[RDS_BUFFER];
42949 int i = 0;
42950
42951 + if (count > RDS_BUFFER)
42952 + return -EFAULT;
42953 mutex_lock(&dev->lock);
42954 if (dev->rdsstat == 0)
42955 cadet_start_rds(dev);
42956 @@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
42957 while (i < count && dev->rdsin != dev->rdsout)
42958 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
42959
42960 - if (i && copy_to_user(data, readbuf, i))
42961 + if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
42962 i = -EFAULT;
42963 unlock:
42964 mutex_unlock(&dev->lock);
42965 diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c
42966 index 5236035..c622c74 100644
42967 --- a/drivers/media/radio/radio-maxiradio.c
42968 +++ b/drivers/media/radio/radio-maxiradio.c
42969 @@ -61,7 +61,7 @@ MODULE_PARM_DESC(radio_nr, "Radio device number");
42970 /* TEA5757 pin mappings */
42971 static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16;
42972
42973 -static atomic_t maxiradio_instance = ATOMIC_INIT(0);
42974 +static atomic_unchecked_t maxiradio_instance = ATOMIC_INIT(0);
42975
42976 #define PCI_VENDOR_ID_GUILLEMOT 0x5046
42977 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001
42978 diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
42979 index b914772..7ddbf9b 100644
42980 --- a/drivers/media/radio/radio-shark.c
42981 +++ b/drivers/media/radio/radio-shark.c
42982 @@ -79,7 +79,7 @@ struct shark_device {
42983 u32 last_val;
42984 };
42985
42986 -static atomic_t shark_instance = ATOMIC_INIT(0);
42987 +static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
42988
42989 static void shark_write_val(struct snd_tea575x *tea, u32 val)
42990 {
42991 diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
42992 index 9fb6697..f167415 100644
42993 --- a/drivers/media/radio/radio-shark2.c
42994 +++ b/drivers/media/radio/radio-shark2.c
42995 @@ -74,7 +74,7 @@ struct shark_device {
42996 u8 *transfer_buffer;
42997 };
42998
42999 -static atomic_t shark_instance = ATOMIC_INIT(0);
43000 +static atomic_unchecked_t shark_instance = ATOMIC_INIT(0);
43001
43002 static int shark_write_reg(struct radio_tea5777 *tea, u64 reg)
43003 {
43004 diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
43005 index 9c9084c..a9e8dfb 100644
43006 --- a/drivers/media/radio/radio-si476x.c
43007 +++ b/drivers/media/radio/radio-si476x.c
43008 @@ -1445,7 +1445,7 @@ static int si476x_radio_probe(struct platform_device *pdev)
43009 struct si476x_radio *radio;
43010 struct v4l2_ctrl *ctrl;
43011
43012 - static atomic_t instance = ATOMIC_INIT(0);
43013 + static atomic_unchecked_t instance = ATOMIC_INIT(0);
43014
43015 radio = devm_kzalloc(&pdev->dev, sizeof(*radio), GFP_KERNEL);
43016 if (!radio)
43017 diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
43018 index 46da365..3ba4206 100644
43019 --- a/drivers/media/rc/rc-main.c
43020 +++ b/drivers/media/rc/rc-main.c
43021 @@ -1065,7 +1065,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
43022 int rc_register_device(struct rc_dev *dev)
43023 {
43024 static bool raw_init = false; /* raw decoders loaded? */
43025 - static atomic_t devno = ATOMIC_INIT(0);
43026 + static atomic_unchecked_t devno = ATOMIC_INIT(0);
43027 struct rc_map *rc_map;
43028 const char *path;
43029 int rc;
43030 @@ -1096,7 +1096,7 @@ int rc_register_device(struct rc_dev *dev)
43031 */
43032 mutex_lock(&dev->lock);
43033
43034 - dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
43035 + dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
43036 dev_set_name(&dev->dev, "rc%ld", dev->devno);
43037 dev_set_drvdata(&dev->dev, dev);
43038 rc = device_add(&dev->dev);
43039 diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
43040 index 20e345d..da56fe4 100644
43041 --- a/drivers/media/usb/dvb-usb/cxusb.c
43042 +++ b/drivers/media/usb/dvb-usb/cxusb.c
43043 @@ -1101,7 +1101,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
43044
43045 struct dib0700_adapter_state {
43046 int (*set_param_save) (struct dvb_frontend *);
43047 -};
43048 +} __no_const;
43049
43050 static int dib7070_set_param_override(struct dvb_frontend *fe)
43051 {
43052 diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
43053 index 71b22f5..a63b33f 100644
43054 --- a/drivers/media/usb/dvb-usb/dw2102.c
43055 +++ b/drivers/media/usb/dvb-usb/dw2102.c
43056 @@ -121,7 +121,7 @@ struct su3000_state {
43057
43058 struct s6x0_state {
43059 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
43060 -};
43061 +} __no_const;
43062
43063 /* debug */
43064 static int dvb_usb_dw2102_debug;
43065 diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
43066 index 8f7a6a4..59502dd 100644
43067 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
43068 +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
43069 @@ -326,7 +326,7 @@ struct v4l2_buffer32 {
43070 __u32 reserved;
43071 };
43072
43073 -static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
43074 +static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
43075 enum v4l2_memory memory)
43076 {
43077 void __user *up_pln;
43078 @@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
43079 return 0;
43080 }
43081
43082 -static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
43083 +static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
43084 enum v4l2_memory memory)
43085 {
43086 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
43087 @@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
43088 put_user(kp->start_block, &up->start_block) ||
43089 put_user(kp->blocks, &up->blocks) ||
43090 put_user(tmp, &up->edid) ||
43091 - copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
43092 + copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
43093 return -EFAULT;
43094 return 0;
43095 }
43096 diff --git a/drivers/media/v4l2-core/v4l2-device.c b/drivers/media/v4l2-core/v4l2-device.c
43097 index 02d1b63..5fd6b16 100644
43098 --- a/drivers/media/v4l2-core/v4l2-device.c
43099 +++ b/drivers/media/v4l2-core/v4l2-device.c
43100 @@ -75,9 +75,9 @@ int v4l2_device_put(struct v4l2_device *v4l2_dev)
43101 EXPORT_SYMBOL_GPL(v4l2_device_put);
43102
43103 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
43104 - atomic_t *instance)
43105 + atomic_unchecked_t *instance)
43106 {
43107 - int num = atomic_inc_return(instance) - 1;
43108 + int num = atomic_inc_return_unchecked(instance) - 1;
43109 int len = strlen(basename);
43110
43111 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
43112 diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
43113 index 68e6b5e..d8b923e 100644
43114 --- a/drivers/media/v4l2-core/v4l2-ioctl.c
43115 +++ b/drivers/media/v4l2-core/v4l2-ioctl.c
43116 @@ -1939,7 +1939,8 @@ struct v4l2_ioctl_info {
43117 struct file *file, void *fh, void *p);
43118 } u;
43119 void (*debug)(const void *arg, bool write_only);
43120 -};
43121 +} __do_const;
43122 +typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
43123
43124 /* This control needs a priority check */
43125 #define INFO_FL_PRIO (1 << 0)
43126 @@ -2120,7 +2121,7 @@ static long __video_do_ioctl(struct file *file,
43127 struct video_device *vfd = video_devdata(file);
43128 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
43129 bool write_only = false;
43130 - struct v4l2_ioctl_info default_info;
43131 + v4l2_ioctl_info_no_const default_info;
43132 const struct v4l2_ioctl_info *info;
43133 void *fh = file->private_data;
43134 struct v4l2_fh *vfh = NULL;
43135 @@ -2194,7 +2195,7 @@ done:
43136 }
43137
43138 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
43139 - void * __user *user_ptr, void ***kernel_ptr)
43140 + void __user **user_ptr, void ***kernel_ptr)
43141 {
43142 int ret = 0;
43143
43144 @@ -2210,7 +2211,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
43145 ret = -EINVAL;
43146 break;
43147 }
43148 - *user_ptr = (void __user *)buf->m.planes;
43149 + *user_ptr = (void __force_user *)buf->m.planes;
43150 *kernel_ptr = (void *)&buf->m.planes;
43151 *array_size = sizeof(struct v4l2_plane) * buf->length;
43152 ret = 1;
43153 @@ -2245,7 +2246,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
43154 ret = -EINVAL;
43155 break;
43156 }
43157 - *user_ptr = (void __user *)ctrls->controls;
43158 + *user_ptr = (void __force_user *)ctrls->controls;
43159 *kernel_ptr = (void *)&ctrls->controls;
43160 *array_size = sizeof(struct v4l2_ext_control)
43161 * ctrls->count;
43162 diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
43163 index 767ff4d..c69d259 100644
43164 --- a/drivers/message/fusion/mptbase.c
43165 +++ b/drivers/message/fusion/mptbase.c
43166 @@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
43167 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
43168 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
43169
43170 +#ifdef CONFIG_GRKERNSEC_HIDESYM
43171 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
43172 +#else
43173 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
43174 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
43175 +#endif
43176 +
43177 /*
43178 * Rounding UP to nearest 4-kB boundary here...
43179 */
43180 @@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
43181 ioc->facts.GlobalCredits);
43182
43183 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
43184 +#ifdef CONFIG_GRKERNSEC_HIDESYM
43185 + NULL, NULL);
43186 +#else
43187 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
43188 +#endif
43189 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
43190 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
43191 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
43192 diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
43193 index dd239bd..689c4f7 100644
43194 --- a/drivers/message/fusion/mptsas.c
43195 +++ b/drivers/message/fusion/mptsas.c
43196 @@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
43197 return 0;
43198 }
43199
43200 +static inline void
43201 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
43202 +{
43203 + if (phy_info->port_details) {
43204 + phy_info->port_details->rphy = rphy;
43205 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
43206 + ioc->name, rphy));
43207 + }
43208 +
43209 + if (rphy) {
43210 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
43211 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
43212 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
43213 + ioc->name, rphy, rphy->dev.release));
43214 + }
43215 +}
43216 +
43217 /* no mutex */
43218 static void
43219 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
43220 @@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
43221 return NULL;
43222 }
43223
43224 -static inline void
43225 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
43226 -{
43227 - if (phy_info->port_details) {
43228 - phy_info->port_details->rphy = rphy;
43229 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
43230 - ioc->name, rphy));
43231 - }
43232 -
43233 - if (rphy) {
43234 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
43235 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
43236 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
43237 - ioc->name, rphy, rphy->dev.release));
43238 - }
43239 -}
43240 -
43241 static inline struct sas_port *
43242 mptsas_get_port(struct mptsas_phyinfo *phy_info)
43243 {
43244 diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
43245 index 727819c..ad74694 100644
43246 --- a/drivers/message/fusion/mptscsih.c
43247 +++ b/drivers/message/fusion/mptscsih.c
43248 @@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
43249
43250 h = shost_priv(SChost);
43251
43252 - if (h) {
43253 - if (h->info_kbuf == NULL)
43254 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
43255 - return h->info_kbuf;
43256 - h->info_kbuf[0] = '\0';
43257 + if (!h)
43258 + return NULL;
43259
43260 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
43261 - h->info_kbuf[size-1] = '\0';
43262 - }
43263 + if (h->info_kbuf == NULL)
43264 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
43265 + return h->info_kbuf;
43266 + h->info_kbuf[0] = '\0';
43267 +
43268 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
43269 + h->info_kbuf[size-1] = '\0';
43270
43271 return h->info_kbuf;
43272 }
43273 diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
43274 index b7d87cd..3fb36da 100644
43275 --- a/drivers/message/i2o/i2o_proc.c
43276 +++ b/drivers/message/i2o/i2o_proc.c
43277 @@ -255,12 +255,6 @@ static char *scsi_devices[] = {
43278 "Array Controller Device"
43279 };
43280
43281 -static char *chtostr(char *tmp, u8 *chars, int n)
43282 -{
43283 - tmp[0] = 0;
43284 - return strncat(tmp, (char *)chars, n);
43285 -}
43286 -
43287 static int i2o_report_query_status(struct seq_file *seq, int block_status,
43288 char *group)
43289 {
43290 @@ -707,9 +701,9 @@ static int i2o_seq_show_status(struct seq_file *seq, void *v)
43291 static int i2o_seq_show_hw(struct seq_file *seq, void *v)
43292 {
43293 struct i2o_controller *c = (struct i2o_controller *)seq->private;
43294 - static u32 work32[5];
43295 - static u8 *work8 = (u8 *) work32;
43296 - static u16 *work16 = (u16 *) work32;
43297 + u32 work32[5];
43298 + u8 *work8 = (u8 *) work32;
43299 + u16 *work16 = (u16 *) work32;
43300 int token;
43301 u32 hwcap;
43302
43303 @@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
43304 } *result;
43305
43306 i2o_exec_execute_ddm_table ddm_table;
43307 - char tmp[28 + 1];
43308
43309 result = kmalloc(sizeof(*result), GFP_KERNEL);
43310 if (!result)
43311 @@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
43312
43313 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
43314 seq_printf(seq, "%-#8x", ddm_table.module_id);
43315 - seq_printf(seq, "%-29s",
43316 - chtostr(tmp, ddm_table.module_name_version, 28));
43317 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
43318 seq_printf(seq, "%9d ", ddm_table.data_size);
43319 seq_printf(seq, "%8d", ddm_table.code_size);
43320
43321 @@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
43322
43323 i2o_driver_result_table *result;
43324 i2o_driver_store_table *dst;
43325 - char tmp[28 + 1];
43326
43327 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
43328 if (result == NULL)
43329 @@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
43330
43331 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
43332 seq_printf(seq, "%-#8x", dst->module_id);
43333 - seq_printf(seq, "%-29s",
43334 - chtostr(tmp, dst->module_name_version, 28));
43335 - seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
43336 + seq_printf(seq, "%-.28s", dst->module_name_version);
43337 + seq_printf(seq, "%-.8s", dst->date);
43338 seq_printf(seq, "%8d ", dst->module_size);
43339 seq_printf(seq, "%8d ", dst->mpb_size);
43340 seq_printf(seq, "0x%04x", dst->module_flags);
43341 @@ -1246,11 +1236,10 @@ static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
43342 static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
43343 {
43344 struct i2o_device *d = (struct i2o_device *)seq->private;
43345 - static u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
43346 + u32 work32[128]; // allow for "stuff" + up to 256 byte (max) serial number
43347 // == (allow) 512d bytes (max)
43348 - static u16 *work16 = (u16 *) work32;
43349 + u16 *work16 = (u16 *) work32;
43350 int token;
43351 - char tmp[16 + 1];
43352
43353 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
43354
43355 @@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
43356 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
43357 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
43358 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
43359 - seq_printf(seq, "Vendor info : %s\n",
43360 - chtostr(tmp, (u8 *) (work32 + 2), 16));
43361 - seq_printf(seq, "Product info : %s\n",
43362 - chtostr(tmp, (u8 *) (work32 + 6), 16));
43363 - seq_printf(seq, "Description : %s\n",
43364 - chtostr(tmp, (u8 *) (work32 + 10), 16));
43365 - seq_printf(seq, "Product rev. : %s\n",
43366 - chtostr(tmp, (u8 *) (work32 + 14), 8));
43367 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
43368 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
43369 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
43370 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
43371
43372 seq_printf(seq, "Serial number : ");
43373 print_serial_number(seq, (u8 *) (work32 + 16),
43374 @@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
43375 u8 pad[256]; // allow up to 256 byte (max) serial number
43376 } result;
43377
43378 - char tmp[24 + 1];
43379 -
43380 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
43381
43382 if (token < 0) {
43383 @@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
43384 }
43385
43386 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
43387 - seq_printf(seq, "Module name : %s\n",
43388 - chtostr(tmp, result.module_name, 24));
43389 - seq_printf(seq, "Module revision : %s\n",
43390 - chtostr(tmp, result.module_rev, 8));
43391 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
43392 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
43393
43394 seq_printf(seq, "Serial number : ");
43395 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
43396 @@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
43397 u8 instance_number[4];
43398 } result;
43399
43400 - char tmp[64 + 1];
43401 -
43402 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
43403
43404 if (token < 0) {
43405 @@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
43406 return 0;
43407 }
43408
43409 - seq_printf(seq, "Device name : %s\n",
43410 - chtostr(tmp, result.device_name, 64));
43411 - seq_printf(seq, "Service name : %s\n",
43412 - chtostr(tmp, result.service_name, 64));
43413 - seq_printf(seq, "Physical name : %s\n",
43414 - chtostr(tmp, result.physical_location, 64));
43415 - seq_printf(seq, "Instance number : %s\n",
43416 - chtostr(tmp, result.instance_number, 4));
43417 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
43418 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
43419 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
43420 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
43421
43422 return 0;
43423 }
43424 @@ -1368,9 +1343,9 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
43425 static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
43426 {
43427 struct i2o_device *d = (struct i2o_device *)seq->private;
43428 - static u32 work32[12];
43429 - static u16 *work16 = (u16 *) work32;
43430 - static u8 *work8 = (u8 *) work32;
43431 + u32 work32[12];
43432 + u16 *work16 = (u16 *) work32;
43433 + u8 *work8 = (u8 *) work32;
43434 int token;
43435
43436 token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
43437 diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
43438 index a8c08f3..155fe3d 100644
43439 --- a/drivers/message/i2o/iop.c
43440 +++ b/drivers/message/i2o/iop.c
43441 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
43442
43443 spin_lock_irqsave(&c->context_list_lock, flags);
43444
43445 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
43446 - atomic_inc(&c->context_list_counter);
43447 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
43448 + atomic_inc_unchecked(&c->context_list_counter);
43449
43450 - entry->context = atomic_read(&c->context_list_counter);
43451 + entry->context = atomic_read_unchecked(&c->context_list_counter);
43452
43453 list_add(&entry->list, &c->context_list);
43454
43455 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
43456
43457 #if BITS_PER_LONG == 64
43458 spin_lock_init(&c->context_list_lock);
43459 - atomic_set(&c->context_list_counter, 0);
43460 + atomic_set_unchecked(&c->context_list_counter, 0);
43461 INIT_LIST_HEAD(&c->context_list);
43462 #endif
43463
43464 diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
43465 index fcbb2e9..2635e11 100644
43466 --- a/drivers/mfd/janz-cmodio.c
43467 +++ b/drivers/mfd/janz-cmodio.c
43468 @@ -13,6 +13,7 @@
43469
43470 #include <linux/kernel.h>
43471 #include <linux/module.h>
43472 +#include <linux/slab.h>
43473 #include <linux/init.h>
43474 #include <linux/pci.h>
43475 #include <linux/interrupt.h>
43476 diff --git a/drivers/mfd/max8925-i2c.c b/drivers/mfd/max8925-i2c.c
43477 index de7fb80..7c1b931 100644
43478 --- a/drivers/mfd/max8925-i2c.c
43479 +++ b/drivers/mfd/max8925-i2c.c
43480 @@ -152,7 +152,7 @@ static int max8925_probe(struct i2c_client *client,
43481 const struct i2c_device_id *id)
43482 {
43483 struct max8925_platform_data *pdata = dev_get_platdata(&client->dev);
43484 - static struct max8925_chip *chip;
43485 + struct max8925_chip *chip;
43486 struct device_node *node = client->dev.of_node;
43487
43488 if (node && !pdata) {
43489 diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
43490 index d792772..cd73ba3 100644
43491 --- a/drivers/mfd/tps65910.c
43492 +++ b/drivers/mfd/tps65910.c
43493 @@ -229,7 +229,7 @@ static int tps65910_irq_init(struct tps65910 *tps65910, int irq,
43494 struct tps65910_platform_data *pdata)
43495 {
43496 int ret = 0;
43497 - static struct regmap_irq_chip *tps6591x_irqs_chip;
43498 + struct regmap_irq_chip *tps6591x_irqs_chip;
43499
43500 if (!irq) {
43501 dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n");
43502 diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
43503 index 9aa6d1e..1631bfc 100644
43504 --- a/drivers/mfd/twl4030-irq.c
43505 +++ b/drivers/mfd/twl4030-irq.c
43506 @@ -35,6 +35,7 @@
43507 #include <linux/of.h>
43508 #include <linux/irqdomain.h>
43509 #include <linux/i2c/twl.h>
43510 +#include <asm/pgtable.h>
43511
43512 #include "twl-core.h"
43513
43514 @@ -726,10 +727,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
43515 * Install an irq handler for each of the SIH modules;
43516 * clone dummy irq_chip since PIH can't *do* anything
43517 */
43518 - twl4030_irq_chip = dummy_irq_chip;
43519 - twl4030_irq_chip.name = "twl4030";
43520 + pax_open_kernel();
43521 + memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
43522 + *(const char **)&twl4030_irq_chip.name = "twl4030";
43523
43524 - twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
43525 + *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
43526 + pax_close_kernel();
43527
43528 for (i = irq_base; i < irq_end; i++) {
43529 irq_set_chip_and_handler(i, &twl4030_irq_chip,
43530 diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
43531 index 464419b..64bae8d 100644
43532 --- a/drivers/misc/c2port/core.c
43533 +++ b/drivers/misc/c2port/core.c
43534 @@ -922,7 +922,9 @@ struct c2port_device *c2port_device_register(char *name,
43535 goto error_idr_alloc;
43536 c2dev->id = ret;
43537
43538 - bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
43539 + pax_open_kernel();
43540 + *(size_t *)&bin_attr_flash_data.size = ops->blocks_num * ops->block_size;
43541 + pax_close_kernel();
43542
43543 c2dev->dev = device_create(c2port_class, NULL, 0, c2dev,
43544 "c2port%d", c2dev->id);
43545 diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
43546 index 36f5d52..32311c3 100644
43547 --- a/drivers/misc/kgdbts.c
43548 +++ b/drivers/misc/kgdbts.c
43549 @@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
43550 char before[BREAK_INSTR_SIZE];
43551 char after[BREAK_INSTR_SIZE];
43552
43553 - probe_kernel_read(before, (char *)kgdbts_break_test,
43554 + probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
43555 BREAK_INSTR_SIZE);
43556 init_simple_test();
43557 ts.tst = plant_and_detach_test;
43558 @@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
43559 /* Activate test with initial breakpoint */
43560 if (!is_early)
43561 kgdb_breakpoint();
43562 - probe_kernel_read(after, (char *)kgdbts_break_test,
43563 + probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
43564 BREAK_INSTR_SIZE);
43565 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
43566 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
43567 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
43568 index 036effe..b3a6336 100644
43569 --- a/drivers/misc/lis3lv02d/lis3lv02d.c
43570 +++ b/drivers/misc/lis3lv02d/lis3lv02d.c
43571 @@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
43572 * the lid is closed. This leads to interrupts as soon as a little move
43573 * is done.
43574 */
43575 - atomic_inc(&lis3->count);
43576 + atomic_inc_unchecked(&lis3->count);
43577
43578 wake_up_interruptible(&lis3->misc_wait);
43579 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
43580 @@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
43581 if (lis3->pm_dev)
43582 pm_runtime_get_sync(lis3->pm_dev);
43583
43584 - atomic_set(&lis3->count, 0);
43585 + atomic_set_unchecked(&lis3->count, 0);
43586 return 0;
43587 }
43588
43589 @@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
43590 add_wait_queue(&lis3->misc_wait, &wait);
43591 while (true) {
43592 set_current_state(TASK_INTERRUPTIBLE);
43593 - data = atomic_xchg(&lis3->count, 0);
43594 + data = atomic_xchg_unchecked(&lis3->count, 0);
43595 if (data)
43596 break;
43597
43598 @@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
43599 struct lis3lv02d, miscdev);
43600
43601 poll_wait(file, &lis3->misc_wait, wait);
43602 - if (atomic_read(&lis3->count))
43603 + if (atomic_read_unchecked(&lis3->count))
43604 return POLLIN | POLLRDNORM;
43605 return 0;
43606 }
43607 diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
43608 index c439c82..1f20f57 100644
43609 --- a/drivers/misc/lis3lv02d/lis3lv02d.h
43610 +++ b/drivers/misc/lis3lv02d/lis3lv02d.h
43611 @@ -297,7 +297,7 @@ struct lis3lv02d {
43612 struct input_polled_dev *idev; /* input device */
43613 struct platform_device *pdev; /* platform device */
43614 struct regulator_bulk_data regulators[2];
43615 - atomic_t count; /* interrupt count after last read */
43616 + atomic_unchecked_t count; /* interrupt count after last read */
43617 union axis_conversion ac; /* hw -> logical axis */
43618 int mapped_btns[3];
43619
43620 diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
43621 index 2f30bad..c4c13d0 100644
43622 --- a/drivers/misc/sgi-gru/gruhandles.c
43623 +++ b/drivers/misc/sgi-gru/gruhandles.c
43624 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
43625 unsigned long nsec;
43626
43627 nsec = CLKS2NSEC(clks);
43628 - atomic_long_inc(&mcs_op_statistics[op].count);
43629 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
43630 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
43631 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
43632 if (mcs_op_statistics[op].max < nsec)
43633 mcs_op_statistics[op].max = nsec;
43634 }
43635 diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
43636 index 4f76359..cdfcb2e 100644
43637 --- a/drivers/misc/sgi-gru/gruprocfs.c
43638 +++ b/drivers/misc/sgi-gru/gruprocfs.c
43639 @@ -32,9 +32,9 @@
43640
43641 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
43642
43643 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
43644 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
43645 {
43646 - unsigned long val = atomic_long_read(v);
43647 + unsigned long val = atomic_long_read_unchecked(v);
43648
43649 seq_printf(s, "%16lu %s\n", val, id);
43650 }
43651 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
43652
43653 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
43654 for (op = 0; op < mcsop_last; op++) {
43655 - count = atomic_long_read(&mcs_op_statistics[op].count);
43656 - total = atomic_long_read(&mcs_op_statistics[op].total);
43657 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
43658 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
43659 max = mcs_op_statistics[op].max;
43660 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
43661 count ? total / count : 0, max);
43662 diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
43663 index 5c3ce24..4915ccb 100644
43664 --- a/drivers/misc/sgi-gru/grutables.h
43665 +++ b/drivers/misc/sgi-gru/grutables.h
43666 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
43667 * GRU statistics.
43668 */
43669 struct gru_stats_s {
43670 - atomic_long_t vdata_alloc;
43671 - atomic_long_t vdata_free;
43672 - atomic_long_t gts_alloc;
43673 - atomic_long_t gts_free;
43674 - atomic_long_t gms_alloc;
43675 - atomic_long_t gms_free;
43676 - atomic_long_t gts_double_allocate;
43677 - atomic_long_t assign_context;
43678 - atomic_long_t assign_context_failed;
43679 - atomic_long_t free_context;
43680 - atomic_long_t load_user_context;
43681 - atomic_long_t load_kernel_context;
43682 - atomic_long_t lock_kernel_context;
43683 - atomic_long_t unlock_kernel_context;
43684 - atomic_long_t steal_user_context;
43685 - atomic_long_t steal_kernel_context;
43686 - atomic_long_t steal_context_failed;
43687 - atomic_long_t nopfn;
43688 - atomic_long_t asid_new;
43689 - atomic_long_t asid_next;
43690 - atomic_long_t asid_wrap;
43691 - atomic_long_t asid_reuse;
43692 - atomic_long_t intr;
43693 - atomic_long_t intr_cbr;
43694 - atomic_long_t intr_tfh;
43695 - atomic_long_t intr_spurious;
43696 - atomic_long_t intr_mm_lock_failed;
43697 - atomic_long_t call_os;
43698 - atomic_long_t call_os_wait_queue;
43699 - atomic_long_t user_flush_tlb;
43700 - atomic_long_t user_unload_context;
43701 - atomic_long_t user_exception;
43702 - atomic_long_t set_context_option;
43703 - atomic_long_t check_context_retarget_intr;
43704 - atomic_long_t check_context_unload;
43705 - atomic_long_t tlb_dropin;
43706 - atomic_long_t tlb_preload_page;
43707 - atomic_long_t tlb_dropin_fail_no_asid;
43708 - atomic_long_t tlb_dropin_fail_upm;
43709 - atomic_long_t tlb_dropin_fail_invalid;
43710 - atomic_long_t tlb_dropin_fail_range_active;
43711 - atomic_long_t tlb_dropin_fail_idle;
43712 - atomic_long_t tlb_dropin_fail_fmm;
43713 - atomic_long_t tlb_dropin_fail_no_exception;
43714 - atomic_long_t tfh_stale_on_fault;
43715 - atomic_long_t mmu_invalidate_range;
43716 - atomic_long_t mmu_invalidate_page;
43717 - atomic_long_t flush_tlb;
43718 - atomic_long_t flush_tlb_gru;
43719 - atomic_long_t flush_tlb_gru_tgh;
43720 - atomic_long_t flush_tlb_gru_zero_asid;
43721 + atomic_long_unchecked_t vdata_alloc;
43722 + atomic_long_unchecked_t vdata_free;
43723 + atomic_long_unchecked_t gts_alloc;
43724 + atomic_long_unchecked_t gts_free;
43725 + atomic_long_unchecked_t gms_alloc;
43726 + atomic_long_unchecked_t gms_free;
43727 + atomic_long_unchecked_t gts_double_allocate;
43728 + atomic_long_unchecked_t assign_context;
43729 + atomic_long_unchecked_t assign_context_failed;
43730 + atomic_long_unchecked_t free_context;
43731 + atomic_long_unchecked_t load_user_context;
43732 + atomic_long_unchecked_t load_kernel_context;
43733 + atomic_long_unchecked_t lock_kernel_context;
43734 + atomic_long_unchecked_t unlock_kernel_context;
43735 + atomic_long_unchecked_t steal_user_context;
43736 + atomic_long_unchecked_t steal_kernel_context;
43737 + atomic_long_unchecked_t steal_context_failed;
43738 + atomic_long_unchecked_t nopfn;
43739 + atomic_long_unchecked_t asid_new;
43740 + atomic_long_unchecked_t asid_next;
43741 + atomic_long_unchecked_t asid_wrap;
43742 + atomic_long_unchecked_t asid_reuse;
43743 + atomic_long_unchecked_t intr;
43744 + atomic_long_unchecked_t intr_cbr;
43745 + atomic_long_unchecked_t intr_tfh;
43746 + atomic_long_unchecked_t intr_spurious;
43747 + atomic_long_unchecked_t intr_mm_lock_failed;
43748 + atomic_long_unchecked_t call_os;
43749 + atomic_long_unchecked_t call_os_wait_queue;
43750 + atomic_long_unchecked_t user_flush_tlb;
43751 + atomic_long_unchecked_t user_unload_context;
43752 + atomic_long_unchecked_t user_exception;
43753 + atomic_long_unchecked_t set_context_option;
43754 + atomic_long_unchecked_t check_context_retarget_intr;
43755 + atomic_long_unchecked_t check_context_unload;
43756 + atomic_long_unchecked_t tlb_dropin;
43757 + atomic_long_unchecked_t tlb_preload_page;
43758 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
43759 + atomic_long_unchecked_t tlb_dropin_fail_upm;
43760 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
43761 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
43762 + atomic_long_unchecked_t tlb_dropin_fail_idle;
43763 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
43764 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
43765 + atomic_long_unchecked_t tfh_stale_on_fault;
43766 + atomic_long_unchecked_t mmu_invalidate_range;
43767 + atomic_long_unchecked_t mmu_invalidate_page;
43768 + atomic_long_unchecked_t flush_tlb;
43769 + atomic_long_unchecked_t flush_tlb_gru;
43770 + atomic_long_unchecked_t flush_tlb_gru_tgh;
43771 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
43772
43773 - atomic_long_t copy_gpa;
43774 - atomic_long_t read_gpa;
43775 + atomic_long_unchecked_t copy_gpa;
43776 + atomic_long_unchecked_t read_gpa;
43777
43778 - atomic_long_t mesq_receive;
43779 - atomic_long_t mesq_receive_none;
43780 - atomic_long_t mesq_send;
43781 - atomic_long_t mesq_send_failed;
43782 - atomic_long_t mesq_noop;
43783 - atomic_long_t mesq_send_unexpected_error;
43784 - atomic_long_t mesq_send_lb_overflow;
43785 - atomic_long_t mesq_send_qlimit_reached;
43786 - atomic_long_t mesq_send_amo_nacked;
43787 - atomic_long_t mesq_send_put_nacked;
43788 - atomic_long_t mesq_page_overflow;
43789 - atomic_long_t mesq_qf_locked;
43790 - atomic_long_t mesq_qf_noop_not_full;
43791 - atomic_long_t mesq_qf_switch_head_failed;
43792 - atomic_long_t mesq_qf_unexpected_error;
43793 - atomic_long_t mesq_noop_unexpected_error;
43794 - atomic_long_t mesq_noop_lb_overflow;
43795 - atomic_long_t mesq_noop_qlimit_reached;
43796 - atomic_long_t mesq_noop_amo_nacked;
43797 - atomic_long_t mesq_noop_put_nacked;
43798 - atomic_long_t mesq_noop_page_overflow;
43799 + atomic_long_unchecked_t mesq_receive;
43800 + atomic_long_unchecked_t mesq_receive_none;
43801 + atomic_long_unchecked_t mesq_send;
43802 + atomic_long_unchecked_t mesq_send_failed;
43803 + atomic_long_unchecked_t mesq_noop;
43804 + atomic_long_unchecked_t mesq_send_unexpected_error;
43805 + atomic_long_unchecked_t mesq_send_lb_overflow;
43806 + atomic_long_unchecked_t mesq_send_qlimit_reached;
43807 + atomic_long_unchecked_t mesq_send_amo_nacked;
43808 + atomic_long_unchecked_t mesq_send_put_nacked;
43809 + atomic_long_unchecked_t mesq_page_overflow;
43810 + atomic_long_unchecked_t mesq_qf_locked;
43811 + atomic_long_unchecked_t mesq_qf_noop_not_full;
43812 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
43813 + atomic_long_unchecked_t mesq_qf_unexpected_error;
43814 + atomic_long_unchecked_t mesq_noop_unexpected_error;
43815 + atomic_long_unchecked_t mesq_noop_lb_overflow;
43816 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
43817 + atomic_long_unchecked_t mesq_noop_amo_nacked;
43818 + atomic_long_unchecked_t mesq_noop_put_nacked;
43819 + atomic_long_unchecked_t mesq_noop_page_overflow;
43820
43821 };
43822
43823 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
43824 tghop_invalidate, mcsop_last};
43825
43826 struct mcs_op_statistic {
43827 - atomic_long_t count;
43828 - atomic_long_t total;
43829 + atomic_long_unchecked_t count;
43830 + atomic_long_unchecked_t total;
43831 unsigned long max;
43832 };
43833
43834 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
43835
43836 #define STAT(id) do { \
43837 if (gru_options & OPT_STATS) \
43838 - atomic_long_inc(&gru_stats.id); \
43839 + atomic_long_inc_unchecked(&gru_stats.id); \
43840 } while (0)
43841
43842 #ifdef CONFIG_SGI_GRU_DEBUG
43843 diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
43844 index c862cd4..0d176fe 100644
43845 --- a/drivers/misc/sgi-xp/xp.h
43846 +++ b/drivers/misc/sgi-xp/xp.h
43847 @@ -288,7 +288,7 @@ struct xpc_interface {
43848 xpc_notify_func, void *);
43849 void (*received) (short, int, void *);
43850 enum xp_retval (*partid_to_nasids) (short, void *);
43851 -};
43852 +} __no_const;
43853
43854 extern struct xpc_interface xpc_interface;
43855
43856 diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
43857 index b94d5f7..7f494c5 100644
43858 --- a/drivers/misc/sgi-xp/xpc.h
43859 +++ b/drivers/misc/sgi-xp/xpc.h
43860 @@ -835,6 +835,7 @@ struct xpc_arch_operations {
43861 void (*received_payload) (struct xpc_channel *, void *);
43862 void (*notify_senders_of_disconnect) (struct xpc_channel *);
43863 };
43864 +typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
43865
43866 /* struct xpc_partition act_state values (for XPC HB) */
43867
43868 @@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
43869 /* found in xpc_main.c */
43870 extern struct device *xpc_part;
43871 extern struct device *xpc_chan;
43872 -extern struct xpc_arch_operations xpc_arch_ops;
43873 +extern xpc_arch_operations_no_const xpc_arch_ops;
43874 extern int xpc_disengage_timelimit;
43875 extern int xpc_disengage_timedout;
43876 extern int xpc_activate_IRQ_rcvd;
43877 diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
43878 index 82dc574..8539ab2 100644
43879 --- a/drivers/misc/sgi-xp/xpc_main.c
43880 +++ b/drivers/misc/sgi-xp/xpc_main.c
43881 @@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
43882 .notifier_call = xpc_system_die,
43883 };
43884
43885 -struct xpc_arch_operations xpc_arch_ops;
43886 +xpc_arch_operations_no_const xpc_arch_ops;
43887
43888 /*
43889 * Timer function to enforce the timelimit on the partition disengage.
43890 @@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
43891
43892 if (((die_args->trapnr == X86_TRAP_MF) ||
43893 (die_args->trapnr == X86_TRAP_XF)) &&
43894 - !user_mode_vm(die_args->regs))
43895 + !user_mode(die_args->regs))
43896 xpc_die_deactivate();
43897
43898 break;
43899 diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
43900 index ef18348..1b53cf0 100644
43901 --- a/drivers/mmc/core/mmc_ops.c
43902 +++ b/drivers/mmc/core/mmc_ops.c
43903 @@ -213,7 +213,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
43904 void *data_buf;
43905 int is_on_stack;
43906
43907 - is_on_stack = object_is_on_stack(buf);
43908 + is_on_stack = object_starts_on_stack(buf);
43909 if (is_on_stack) {
43910 /*
43911 * dma onto stack is unsafe/nonportable, but callers to this
43912 diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
43913 index 81b2994..dce857e 100644
43914 --- a/drivers/mmc/host/dw_mmc.h
43915 +++ b/drivers/mmc/host/dw_mmc.h
43916 @@ -203,5 +203,5 @@ struct dw_mci_drv_data {
43917 void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
43918 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
43919 int (*parse_dt)(struct dw_mci *host);
43920 -};
43921 +} __do_const;
43922 #endif /* _DW_MMC_H_ */
43923 diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
43924 index c3785ed..1984c44 100644
43925 --- a/drivers/mmc/host/mmci.c
43926 +++ b/drivers/mmc/host/mmci.c
43927 @@ -1482,7 +1482,9 @@ static int mmci_probe(struct amba_device *dev,
43928 }
43929
43930 if (variant->busy_detect) {
43931 - mmci_ops.card_busy = mmci_card_busy;
43932 + pax_open_kernel();
43933 + *(void **)&mmci_ops.card_busy = mmci_card_busy;
43934 + pax_close_kernel();
43935 mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
43936 }
43937
43938 diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
43939 index 6debda9..2ba7427 100644
43940 --- a/drivers/mmc/host/sdhci-s3c.c
43941 +++ b/drivers/mmc/host/sdhci-s3c.c
43942 @@ -668,9 +668,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
43943 * we can use overriding functions instead of default.
43944 */
43945 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
43946 - sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
43947 - sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
43948 - sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
43949 + pax_open_kernel();
43950 + *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
43951 + *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
43952 + *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
43953 + pax_close_kernel();
43954 }
43955
43956 /* It supports additional host capabilities if needed */
43957 diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
43958 index 096993f..f02c23b 100644
43959 --- a/drivers/mtd/chips/cfi_cmdset_0020.c
43960 +++ b/drivers/mtd/chips/cfi_cmdset_0020.c
43961 @@ -669,7 +669,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs,
43962 size_t totlen = 0, thislen;
43963 int ret = 0;
43964 size_t buflen = 0;
43965 - static char *buffer;
43966 + char *buffer;
43967
43968 if (!ECCBUF_SIZE) {
43969 /* We should fall back to a general writev implementation.
43970 diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
43971 index 2ed2bb3..2d0b82e 100644
43972 --- a/drivers/mtd/nand/denali.c
43973 +++ b/drivers/mtd/nand/denali.c
43974 @@ -24,6 +24,7 @@
43975 #include <linux/slab.h>
43976 #include <linux/mtd/mtd.h>
43977 #include <linux/module.h>
43978 +#include <linux/slab.h>
43979
43980 #include "denali.h"
43981
43982 diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
43983 index 51b9d6a..52af9a7 100644
43984 --- a/drivers/mtd/nftlmount.c
43985 +++ b/drivers/mtd/nftlmount.c
43986 @@ -24,6 +24,7 @@
43987 #include <asm/errno.h>
43988 #include <linux/delay.h>
43989 #include <linux/slab.h>
43990 +#include <linux/sched.h>
43991 #include <linux/mtd/mtd.h>
43992 #include <linux/mtd/nand.h>
43993 #include <linux/mtd/nftl.h>
43994 diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
43995 index 4b8e895..6b3c498 100644
43996 --- a/drivers/mtd/sm_ftl.c
43997 +++ b/drivers/mtd/sm_ftl.c
43998 @@ -56,7 +56,7 @@ static ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
43999 #define SM_CIS_VENDOR_OFFSET 0x59
44000 static struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
44001 {
44002 - struct attribute_group *attr_group;
44003 + attribute_group_no_const *attr_group;
44004 struct attribute **attributes;
44005 struct sm_sysfs_attribute *vendor_attribute;
44006
44007 diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
44008 index dd8057d..22aaf36 100644
44009 --- a/drivers/net/bonding/bond_main.c
44010 +++ b/drivers/net/bonding/bond_main.c
44011 @@ -4511,7 +4511,7 @@ static unsigned int bond_get_num_tx_queues(void)
44012 return tx_queues;
44013 }
44014
44015 -static struct rtnl_link_ops bond_link_ops __read_mostly = {
44016 +static struct rtnl_link_ops bond_link_ops = {
44017 .kind = "bond",
44018 .priv_size = sizeof(struct bonding),
44019 .setup = bond_setup,
44020 @@ -4636,8 +4636,8 @@ static void __exit bonding_exit(void)
44021
44022 bond_destroy_debugfs();
44023
44024 - rtnl_link_unregister(&bond_link_ops);
44025 unregister_pernet_subsys(&bond_net_ops);
44026 + rtnl_link_unregister(&bond_link_ops);
44027
44028 #ifdef CONFIG_NET_POLL_CONTROLLER
44029 /*
44030 diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
44031 index f92f001..0b2f9bf 100644
44032 --- a/drivers/net/ethernet/8390/ax88796.c
44033 +++ b/drivers/net/ethernet/8390/ax88796.c
44034 @@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
44035 if (ax->plat->reg_offsets)
44036 ei_local->reg_offset = ax->plat->reg_offsets;
44037 else {
44038 + resource_size_t _mem_size = mem_size;
44039 + do_div(_mem_size, 0x18);
44040 ei_local->reg_offset = ax->reg_offsets;
44041 for (ret = 0; ret < 0x18; ret++)
44042 - ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
44043 + ax->reg_offsets[ret] = _mem_size * ret;
44044 }
44045
44046 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
44047 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
44048 index da8fcaa..f4b5d3b 100644
44049 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
44050 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
44051 @@ -1138,7 +1138,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
44052 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
44053 {
44054 /* RX_MODE controlling object */
44055 - bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
44056 + bnx2x_init_rx_mode_obj(bp);
44057
44058 /* multicast configuration controlling object */
44059 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
44060 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
44061 index 9fbeee5..5e3e37a 100644
44062 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
44063 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
44064 @@ -2590,15 +2590,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
44065 return rc;
44066 }
44067
44068 -void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
44069 - struct bnx2x_rx_mode_obj *o)
44070 +void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
44071 {
44072 if (CHIP_IS_E1x(bp)) {
44073 - o->wait_comp = bnx2x_empty_rx_mode_wait;
44074 - o->config_rx_mode = bnx2x_set_rx_mode_e1x;
44075 + bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
44076 + bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
44077 } else {
44078 - o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
44079 - o->config_rx_mode = bnx2x_set_rx_mode_e2;
44080 + bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
44081 + bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
44082 }
44083 }
44084
44085 diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
44086 index 658f4e3..15074a6 100644
44087 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
44088 +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
44089 @@ -1325,8 +1325,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
44090
44091 /********************* RX MODE ****************/
44092
44093 -void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
44094 - struct bnx2x_rx_mode_obj *o);
44095 +void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
44096
44097 /**
44098 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
44099 diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
44100 index 7025780..e55a71c 100644
44101 --- a/drivers/net/ethernet/broadcom/tg3.h
44102 +++ b/drivers/net/ethernet/broadcom/tg3.h
44103 @@ -147,6 +147,7 @@
44104 #define CHIPREV_ID_5750_A0 0x4000
44105 #define CHIPREV_ID_5750_A1 0x4001
44106 #define CHIPREV_ID_5750_A3 0x4003
44107 +#define CHIPREV_ID_5750_C1 0x4201
44108 #define CHIPREV_ID_5750_C2 0x4202
44109 #define CHIPREV_ID_5752_A0_HW 0x5000
44110 #define CHIPREV_ID_5752_A0 0x6000
44111 diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
44112 index 8cffcdf..aadf043 100644
44113 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
44114 +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
44115 @@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
44116 */
44117 struct l2t_skb_cb {
44118 arp_failure_handler_func arp_failure_handler;
44119 -};
44120 +} __no_const;
44121
44122 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
44123
44124 diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
44125 index c73cabd..cd278b1 100644
44126 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
44127 +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
44128 @@ -2186,7 +2186,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
44129
44130 int i;
44131 struct adapter *ap = netdev2adap(dev);
44132 - static const unsigned int *reg_ranges;
44133 + const unsigned int *reg_ranges;
44134 int arr_size = 0, buf_size = 0;
44135
44136 if (is_t4(ap->chip)) {
44137 diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
44138 index 263b92c..f05134b 100644
44139 --- a/drivers/net/ethernet/dec/tulip/de4x5.c
44140 +++ b/drivers/net/ethernet/dec/tulip/de4x5.c
44141 @@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
44142 for (i=0; i<ETH_ALEN; i++) {
44143 tmp.addr[i] = dev->dev_addr[i];
44144 }
44145 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
44146 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
44147 break;
44148
44149 case DE4X5_SET_HWADDR: /* Set the hardware address */
44150 @@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
44151 spin_lock_irqsave(&lp->lock, flags);
44152 memcpy(&statbuf, &lp->pktStats, ioc->len);
44153 spin_unlock_irqrestore(&lp->lock, flags);
44154 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
44155 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
44156 return -EFAULT;
44157 break;
44158 }
44159 diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
44160 index 2c38cc4..0323f6e 100644
44161 --- a/drivers/net/ethernet/emulex/benet/be_main.c
44162 +++ b/drivers/net/ethernet/emulex/benet/be_main.c
44163 @@ -470,7 +470,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
44164
44165 if (wrapped)
44166 newacc += 65536;
44167 - ACCESS_ONCE(*acc) = newacc;
44168 + ACCESS_ONCE_RW(*acc) = newacc;
44169 }
44170
44171 static void populate_erx_stats(struct be_adapter *adapter,
44172 diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
44173 index 212f44b..fb69959 100644
44174 --- a/drivers/net/ethernet/faraday/ftgmac100.c
44175 +++ b/drivers/net/ethernet/faraday/ftgmac100.c
44176 @@ -31,6 +31,8 @@
44177 #include <linux/netdevice.h>
44178 #include <linux/phy.h>
44179 #include <linux/platform_device.h>
44180 +#include <linux/interrupt.h>
44181 +#include <linux/irqreturn.h>
44182 #include <net/ip.h>
44183
44184 #include "ftgmac100.h"
44185 diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
44186 index 8be5b40..081bc1b 100644
44187 --- a/drivers/net/ethernet/faraday/ftmac100.c
44188 +++ b/drivers/net/ethernet/faraday/ftmac100.c
44189 @@ -31,6 +31,8 @@
44190 #include <linux/module.h>
44191 #include <linux/netdevice.h>
44192 #include <linux/platform_device.h>
44193 +#include <linux/interrupt.h>
44194 +#include <linux/irqreturn.h>
44195
44196 #include "ftmac100.h"
44197
44198 diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
44199 index 5184e2a..acb28c3 100644
44200 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
44201 +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
44202 @@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
44203 }
44204
44205 /* update the base incval used to calculate frequency adjustment */
44206 - ACCESS_ONCE(adapter->base_incval) = incval;
44207 + ACCESS_ONCE_RW(adapter->base_incval) = incval;
44208 smp_mb();
44209
44210 /* need lock to prevent incorrect read while modifying cyclecounter */
44211 diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
44212 index fbe5363..266b4e3 100644
44213 --- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
44214 +++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
44215 @@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
44216 struct __vxge_hw_fifo *fifo;
44217 struct vxge_hw_fifo_config *config;
44218 u32 txdl_size, txdl_per_memblock;
44219 - struct vxge_hw_mempool_cbs fifo_mp_callback;
44220 + static struct vxge_hw_mempool_cbs fifo_mp_callback = {
44221 + .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
44222 + };
44223 +
44224 struct __vxge_hw_virtualpath *vpath;
44225
44226 if ((vp == NULL) || (attr == NULL)) {
44227 @@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
44228 goto exit;
44229 }
44230
44231 - fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
44232 -
44233 fifo->mempool =
44234 __vxge_hw_mempool_create(vpath->hldev,
44235 fifo->config->memblock_size,
44236 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
44237 index f09e787..f3916a8 100644
44238 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
44239 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
44240 @@ -2055,7 +2055,9 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
44241
44242 } else if (ret == QLC_83XX_DEFAULT_OPMODE) {
44243 ahw->nic_mode = QLC_83XX_DEFAULT_MODE;
44244 - adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
44245 + pax_open_kernel();
44246 + *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
44247 + pax_close_kernel();
44248 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
44249 } else {
44250 return -EIO;
44251 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
44252 index 0248a4c..9648d96 100644
44253 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
44254 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
44255 @@ -191,17 +191,23 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
44256 case QLCNIC_NON_PRIV_FUNC:
44257 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
44258 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
44259 - nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
44260 + pax_open_kernel();
44261 + *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
44262 + pax_close_kernel();
44263 break;
44264 case QLCNIC_PRIV_FUNC:
44265 ahw->op_mode = QLCNIC_PRIV_FUNC;
44266 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
44267 - nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
44268 + pax_open_kernel();
44269 + *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
44270 + pax_close_kernel();
44271 break;
44272 case QLCNIC_MGMT_FUNC:
44273 ahw->op_mode = QLCNIC_MGMT_FUNC;
44274 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
44275 - nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
44276 + pax_open_kernel();
44277 + *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
44278 + pax_close_kernel();
44279 break;
44280 default:
44281 dev_err(&adapter->pdev->dev, "Invalid Virtual NIC opmode\n");
44282 diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
44283 index 1551360..ed6510f 100644
44284 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
44285 +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
44286 @@ -1108,7 +1108,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
44287 struct qlcnic_dump_entry *entry;
44288 struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
44289 struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
44290 - static const struct qlcnic_dump_operations *fw_dump_ops;
44291 + const struct qlcnic_dump_operations *fw_dump_ops;
44292 struct device *dev = &adapter->pdev->dev;
44293 struct qlcnic_hardware_context *ahw;
44294 void *temp_buffer;
44295 diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
44296 index fb3f8dc..9d2ff38 100644
44297 --- a/drivers/net/ethernet/realtek/r8169.c
44298 +++ b/drivers/net/ethernet/realtek/r8169.c
44299 @@ -759,22 +759,22 @@ struct rtl8169_private {
44300 struct mdio_ops {
44301 void (*write)(struct rtl8169_private *, int, int);
44302 int (*read)(struct rtl8169_private *, int);
44303 - } mdio_ops;
44304 + } __no_const mdio_ops;
44305
44306 struct pll_power_ops {
44307 void (*down)(struct rtl8169_private *);
44308 void (*up)(struct rtl8169_private *);
44309 - } pll_power_ops;
44310 + } __no_const pll_power_ops;
44311
44312 struct jumbo_ops {
44313 void (*enable)(struct rtl8169_private *);
44314 void (*disable)(struct rtl8169_private *);
44315 - } jumbo_ops;
44316 + } __no_const jumbo_ops;
44317
44318 struct csi_ops {
44319 void (*write)(struct rtl8169_private *, int, int);
44320 u32 (*read)(struct rtl8169_private *, int);
44321 - } csi_ops;
44322 + } __no_const csi_ops;
44323
44324 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
44325 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
44326 diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
44327 index 03acf57..e1251ff 100644
44328 --- a/drivers/net/ethernet/sfc/ptp.c
44329 +++ b/drivers/net/ethernet/sfc/ptp.c
44330 @@ -539,7 +539,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
44331 ptp->start.dma_addr);
44332
44333 /* Clear flag that signals MC ready */
44334 - ACCESS_ONCE(*start) = 0;
44335 + ACCESS_ONCE_RW(*start) = 0;
44336 rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
44337 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
44338 EFX_BUG_ON_PARANOID(rc);
44339 diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
44340 index 50617c5..b13724c 100644
44341 --- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
44342 +++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
44343 @@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
44344
44345 writel(value, ioaddr + MMC_CNTRL);
44346
44347 - pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
44348 - MMC_CNTRL, value);
44349 +// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
44350 +// MMC_CNTRL, value);
44351 }
44352
44353 /* To mask all all interrupts.*/
44354 diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
44355 index 3169252..5d78c1d 100644
44356 --- a/drivers/net/hamradio/hdlcdrv.c
44357 +++ b/drivers/net/hamradio/hdlcdrv.c
44358 @@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44359 case HDLCDRVCTL_CALIBRATE:
44360 if(!capable(CAP_SYS_RAWIO))
44361 return -EPERM;
44362 + if (bi.data.calibrate > INT_MAX / s->par.bitrate)
44363 + return -EINVAL;
44364 s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
44365 return 0;
44366
44367 diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
44368 index e6fe0d8..2b7d752 100644
44369 --- a/drivers/net/hyperv/hyperv_net.h
44370 +++ b/drivers/net/hyperv/hyperv_net.h
44371 @@ -101,7 +101,7 @@ struct rndis_device {
44372
44373 enum rndis_device_state state;
44374 bool link_state;
44375 - atomic_t new_req_id;
44376 + atomic_unchecked_t new_req_id;
44377
44378 spinlock_t request_lock;
44379 struct list_head req_list;
44380 diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
44381 index 0775f0a..d4fb316 100644
44382 --- a/drivers/net/hyperv/rndis_filter.c
44383 +++ b/drivers/net/hyperv/rndis_filter.c
44384 @@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
44385 * template
44386 */
44387 set = &rndis_msg->msg.set_req;
44388 - set->req_id = atomic_inc_return(&dev->new_req_id);
44389 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
44390
44391 /* Add to the request list */
44392 spin_lock_irqsave(&dev->request_lock, flags);
44393 @@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
44394
44395 /* Setup the rndis set */
44396 halt = &request->request_msg.msg.halt_req;
44397 - halt->req_id = atomic_inc_return(&dev->new_req_id);
44398 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
44399
44400 /* Ignore return since this msg is optional. */
44401 rndis_filter_send_request(dev, request);
44402 diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
44403 index bf0d55e..82bcfbd1 100644
44404 --- a/drivers/net/ieee802154/fakehard.c
44405 +++ b/drivers/net/ieee802154/fakehard.c
44406 @@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
44407 phy->transmit_power = 0xbf;
44408
44409 dev->netdev_ops = &fake_ops;
44410 - dev->ml_priv = &fake_mlme;
44411 + dev->ml_priv = (void *)&fake_mlme;
44412
44413 priv = netdev_priv(dev);
44414 priv->phy = phy;
44415 diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
44416 index 9bf46bd..bfdaa84 100644
44417 --- a/drivers/net/macvlan.c
44418 +++ b/drivers/net/macvlan.c
44419 @@ -939,13 +939,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
44420 int macvlan_link_register(struct rtnl_link_ops *ops)
44421 {
44422 /* common fields */
44423 - ops->priv_size = sizeof(struct macvlan_dev);
44424 - ops->validate = macvlan_validate;
44425 - ops->maxtype = IFLA_MACVLAN_MAX;
44426 - ops->policy = macvlan_policy;
44427 - ops->changelink = macvlan_changelink;
44428 - ops->get_size = macvlan_get_size;
44429 - ops->fill_info = macvlan_fill_info;
44430 + pax_open_kernel();
44431 + *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
44432 + *(void **)&ops->validate = macvlan_validate;
44433 + *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
44434 + *(const void **)&ops->policy = macvlan_policy;
44435 + *(void **)&ops->changelink = macvlan_changelink;
44436 + *(void **)&ops->get_size = macvlan_get_size;
44437 + *(void **)&ops->fill_info = macvlan_fill_info;
44438 + pax_close_kernel();
44439
44440 return rtnl_link_register(ops);
44441 };
44442 @@ -1001,7 +1003,7 @@ static int macvlan_device_event(struct notifier_block *unused,
44443 return NOTIFY_DONE;
44444 }
44445
44446 -static struct notifier_block macvlan_notifier_block __read_mostly = {
44447 +static struct notifier_block macvlan_notifier_block = {
44448 .notifier_call = macvlan_device_event,
44449 };
44450
44451 diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
44452 index dc76670..e18f39c 100644
44453 --- a/drivers/net/macvtap.c
44454 +++ b/drivers/net/macvtap.c
44455 @@ -1189,7 +1189,7 @@ static int macvtap_device_event(struct notifier_block *unused,
44456 return NOTIFY_DONE;
44457 }
44458
44459 -static struct notifier_block macvtap_notifier_block __read_mostly = {
44460 +static struct notifier_block macvtap_notifier_block = {
44461 .notifier_call = macvtap_device_event,
44462 };
44463
44464 diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
44465 index daec9b0..6428fcb 100644
44466 --- a/drivers/net/phy/mdio-bitbang.c
44467 +++ b/drivers/net/phy/mdio-bitbang.c
44468 @@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
44469 struct mdiobb_ctrl *ctrl = bus->priv;
44470
44471 module_put(ctrl->ops->owner);
44472 + mdiobus_unregister(bus);
44473 mdiobus_free(bus);
44474 }
44475 EXPORT_SYMBOL(free_mdio_bitbang);
44476 diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
44477 index 72ff14b..11d442d 100644
44478 --- a/drivers/net/ppp/ppp_generic.c
44479 +++ b/drivers/net/ppp/ppp_generic.c
44480 @@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44481 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
44482 struct ppp_stats stats;
44483 struct ppp_comp_stats cstats;
44484 - char *vers;
44485
44486 switch (cmd) {
44487 case SIOCGPPPSTATS:
44488 @@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44489 break;
44490
44491 case SIOCGPPPVER:
44492 - vers = PPP_VERSION;
44493 - if (copy_to_user(addr, vers, strlen(vers) + 1))
44494 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
44495 break;
44496 err = 0;
44497 break;
44498 diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
44499 index 1252d9c..80e660b 100644
44500 --- a/drivers/net/slip/slhc.c
44501 +++ b/drivers/net/slip/slhc.c
44502 @@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
44503 register struct tcphdr *thp;
44504 register struct iphdr *ip;
44505 register struct cstate *cs;
44506 - int len, hdrlen;
44507 + long len, hdrlen;
44508 unsigned char *cp = icp;
44509
44510 /* We've got a compressed packet; read the change byte */
44511 diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
44512 index 6327df2..e6e1ebe 100644
44513 --- a/drivers/net/team/team.c
44514 +++ b/drivers/net/team/team.c
44515 @@ -2873,7 +2873,7 @@ static int team_device_event(struct notifier_block *unused,
44516 return NOTIFY_DONE;
44517 }
44518
44519 -static struct notifier_block team_notifier_block __read_mostly = {
44520 +static struct notifier_block team_notifier_block = {
44521 .notifier_call = team_device_event,
44522 };
44523
44524 diff --git a/drivers/net/tun.c b/drivers/net/tun.c
44525 index 782e38b..d076fdc 100644
44526 --- a/drivers/net/tun.c
44527 +++ b/drivers/net/tun.c
44528 @@ -1834,7 +1834,7 @@ unlock:
44529 }
44530
44531 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
44532 - unsigned long arg, int ifreq_len)
44533 + unsigned long arg, size_t ifreq_len)
44534 {
44535 struct tun_file *tfile = file->private_data;
44536 struct tun_struct *tun;
44537 @@ -1847,6 +1847,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
44538 unsigned int ifindex;
44539 int ret;
44540
44541 + if (ifreq_len > sizeof ifr)
44542 + return -EFAULT;
44543 +
44544 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
44545 if (copy_from_user(&ifr, argp, ifreq_len))
44546 return -EFAULT;
44547 diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
44548 index 86292e6..8d34433 100644
44549 --- a/drivers/net/usb/hso.c
44550 +++ b/drivers/net/usb/hso.c
44551 @@ -71,7 +71,7 @@
44552 #include <asm/byteorder.h>
44553 #include <linux/serial_core.h>
44554 #include <linux/serial.h>
44555 -
44556 +#include <asm/local.h>
44557
44558 #define MOD_AUTHOR "Option Wireless"
44559 #define MOD_DESCRIPTION "USB High Speed Option driver"
44560 @@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
44561 struct urb *urb;
44562
44563 urb = serial->rx_urb[0];
44564 - if (serial->port.count > 0) {
44565 + if (atomic_read(&serial->port.count) > 0) {
44566 count = put_rxbuf_data(urb, serial);
44567 if (count == -1)
44568 return;
44569 @@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
44570 DUMP1(urb->transfer_buffer, urb->actual_length);
44571
44572 /* Anyone listening? */
44573 - if (serial->port.count == 0)
44574 + if (atomic_read(&serial->port.count) == 0)
44575 return;
44576
44577 if (status == 0) {
44578 @@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
44579 tty_port_tty_set(&serial->port, tty);
44580
44581 /* check for port already opened, if not set the termios */
44582 - serial->port.count++;
44583 - if (serial->port.count == 1) {
44584 + if (atomic_inc_return(&serial->port.count) == 1) {
44585 serial->rx_state = RX_IDLE;
44586 /* Force default termio settings */
44587 _hso_serial_set_termios(tty, NULL);
44588 @@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
44589 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
44590 if (result) {
44591 hso_stop_serial_device(serial->parent);
44592 - serial->port.count--;
44593 + atomic_dec(&serial->port.count);
44594 kref_put(&serial->parent->ref, hso_serial_ref_free);
44595 }
44596 } else {
44597 @@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
44598
44599 /* reset the rts and dtr */
44600 /* do the actual close */
44601 - serial->port.count--;
44602 + atomic_dec(&serial->port.count);
44603
44604 - if (serial->port.count <= 0) {
44605 - serial->port.count = 0;
44606 + if (atomic_read(&serial->port.count) <= 0) {
44607 + atomic_set(&serial->port.count, 0);
44608 tty_port_tty_set(&serial->port, NULL);
44609 if (!usb_gone)
44610 hso_stop_serial_device(serial->parent);
44611 @@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
44612
44613 /* the actual setup */
44614 spin_lock_irqsave(&serial->serial_lock, flags);
44615 - if (serial->port.count)
44616 + if (atomic_read(&serial->port.count))
44617 _hso_serial_set_termios(tty, old);
44618 else
44619 tty->termios = *old;
44620 @@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
44621 D1("Pending read interrupt on port %d\n", i);
44622 spin_lock(&serial->serial_lock);
44623 if (serial->rx_state == RX_IDLE &&
44624 - serial->port.count > 0) {
44625 + atomic_read(&serial->port.count) > 0) {
44626 /* Setup and send a ctrl req read on
44627 * port i */
44628 if (!serial->rx_urb_filled[0]) {
44629 @@ -3062,7 +3061,7 @@ static int hso_resume(struct usb_interface *iface)
44630 /* Start all serial ports */
44631 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
44632 if (serial_table[i] && (serial_table[i]->interface == iface)) {
44633 - if (dev2ser(serial_table[i])->port.count) {
44634 + if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
44635 result =
44636 hso_start_serial_device(serial_table[i], GFP_NOIO);
44637 hso_kick_transmit(dev2ser(serial_table[i]));
44638 diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
44639 index a79e9d3..78cd4fa 100644
44640 --- a/drivers/net/usb/sierra_net.c
44641 +++ b/drivers/net/usb/sierra_net.c
44642 @@ -52,7 +52,7 @@ static const char driver_name[] = "sierra_net";
44643 /* atomic counter partially included in MAC address to make sure 2 devices
44644 * do not end up with the same MAC - concept breaks in case of > 255 ifaces
44645 */
44646 -static atomic_t iface_counter = ATOMIC_INIT(0);
44647 +static atomic_unchecked_t iface_counter = ATOMIC_INIT(0);
44648
44649 /*
44650 * SYNC Timer Delay definition used to set the expiry time
44651 @@ -698,7 +698,7 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf)
44652 dev->net->netdev_ops = &sierra_net_device_ops;
44653
44654 /* change MAC addr to include, ifacenum, and to be unique */
44655 - dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return(&iface_counter);
44656 + dev->net->dev_addr[ETH_ALEN-2] = atomic_inc_return_unchecked(&iface_counter);
44657 dev->net->dev_addr[ETH_ALEN-1] = ifacenum;
44658
44659 /* we will have to manufacture ethernet headers, prepare template */
44660 diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
44661 index 2ef5b62..6fa0ec3 100644
44662 --- a/drivers/net/vxlan.c
44663 +++ b/drivers/net/vxlan.c
44664 @@ -2615,7 +2615,7 @@ nla_put_failure:
44665 return -EMSGSIZE;
44666 }
44667
44668 -static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
44669 +static struct rtnl_link_ops vxlan_link_ops = {
44670 .kind = "vxlan",
44671 .maxtype = IFLA_VXLAN_MAX,
44672 .policy = vxlan_policy,
44673 diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
44674 index 0b60295..b8bfa5b 100644
44675 --- a/drivers/net/wimax/i2400m/rx.c
44676 +++ b/drivers/net/wimax/i2400m/rx.c
44677 @@ -1359,7 +1359,7 @@ int i2400m_rx_setup(struct i2400m *i2400m)
44678 if (i2400m->rx_roq == NULL)
44679 goto error_roq_alloc;
44680
44681 - rd = kcalloc(I2400M_RO_CIN + 1, sizeof(*i2400m->rx_roq[0].log),
44682 + rd = kcalloc(sizeof(*i2400m->rx_roq[0].log), I2400M_RO_CIN + 1,
44683 GFP_KERNEL);
44684 if (rd == NULL) {
44685 result = -ENOMEM;
44686 diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
44687 index 7fe1964..7016de0 100644
44688 --- a/drivers/net/wireless/airo.c
44689 +++ b/drivers/net/wireless/airo.c
44690 @@ -7844,7 +7844,7 @@ static int writerids(struct net_device *dev, aironet_ioctl *comp) {
44691 struct airo_info *ai = dev->ml_priv;
44692 int ridcode;
44693 int enabled;
44694 - static int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
44695 + int (* writer)(struct airo_info *, u16 rid, const void *, int, int);
44696 unsigned char *iobuf;
44697
44698 /* Only super-user can write RIDs */
44699 diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
44700 index 34c8a33..3261fdc 100644
44701 --- a/drivers/net/wireless/at76c50x-usb.c
44702 +++ b/drivers/net/wireless/at76c50x-usb.c
44703 @@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
44704 }
44705
44706 /* Convert timeout from the DFU status to jiffies */
44707 -static inline unsigned long at76_get_timeout(struct dfu_status *s)
44708 +static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
44709 {
44710 return msecs_to_jiffies((s->poll_timeout[2] << 16)
44711 | (s->poll_timeout[1] << 8)
44712 diff --git a/drivers/net/wireless/ath/ath10k/htc.c b/drivers/net/wireless/ath/ath10k/htc.c
44713 index ef3329e..c28ff5d 100644
44714 --- a/drivers/net/wireless/ath/ath10k/htc.c
44715 +++ b/drivers/net/wireless/ath/ath10k/htc.c
44716 @@ -963,7 +963,10 @@ void ath10k_htc_stop(struct ath10k_htc *htc)
44717 /* registered target arrival callback from the HIF layer */
44718 int ath10k_htc_init(struct ath10k *ar)
44719 {
44720 - struct ath10k_hif_cb htc_callbacks;
44721 + static struct ath10k_hif_cb htc_callbacks = {
44722 + .rx_completion = ath10k_htc_rx_completion_handler,
44723 + .tx_completion = ath10k_htc_tx_completion_handler,
44724 + };
44725 struct ath10k_htc_ep *ep = NULL;
44726 struct ath10k_htc *htc = &ar->htc;
44727
44728 @@ -973,8 +976,6 @@ int ath10k_htc_init(struct ath10k *ar)
44729 ath10k_htc_reset_endpoint_states(htc);
44730
44731 /* setup HIF layer callbacks */
44732 - htc_callbacks.rx_completion = ath10k_htc_rx_completion_handler;
44733 - htc_callbacks.tx_completion = ath10k_htc_tx_completion_handler;
44734 htc->ar = ar;
44735
44736 /* Get HIF default pipe for HTC message exchange */
44737 diff --git a/drivers/net/wireless/ath/ath10k/htc.h b/drivers/net/wireless/ath/ath10k/htc.h
44738 index e1dd8c7..9f91b3f 100644
44739 --- a/drivers/net/wireless/ath/ath10k/htc.h
44740 +++ b/drivers/net/wireless/ath/ath10k/htc.h
44741 @@ -271,12 +271,12 @@ enum ath10k_htc_ep_id {
44742
44743 struct ath10k_htc_ops {
44744 void (*target_send_suspend_complete)(struct ath10k *ar);
44745 -};
44746 +} __no_const;
44747
44748 struct ath10k_htc_ep_ops {
44749 void (*ep_tx_complete)(struct ath10k *, struct sk_buff *);
44750 void (*ep_rx_complete)(struct ath10k *, struct sk_buff *);
44751 -};
44752 +} __no_const;
44753
44754 /* service connection information */
44755 struct ath10k_htc_svc_conn_req {
44756 diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
44757 index 8d78253..bebbb68 100644
44758 --- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
44759 +++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
44760 @@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44761 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
44762 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
44763
44764 - ACCESS_ONCE(ads->ds_link) = i->link;
44765 - ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
44766 + ACCESS_ONCE_RW(ads->ds_link) = i->link;
44767 + ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
44768
44769 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
44770 ctl6 = SM(i->keytype, AR_EncrType);
44771 @@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44772
44773 if ((i->is_first || i->is_last) &&
44774 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
44775 - ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
44776 + ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
44777 | set11nTries(i->rates, 1)
44778 | set11nTries(i->rates, 2)
44779 | set11nTries(i->rates, 3)
44780 | (i->dur_update ? AR_DurUpdateEna : 0)
44781 | SM(0, AR_BurstDur);
44782
44783 - ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
44784 + ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
44785 | set11nRate(i->rates, 1)
44786 | set11nRate(i->rates, 2)
44787 | set11nRate(i->rates, 3);
44788 } else {
44789 - ACCESS_ONCE(ads->ds_ctl2) = 0;
44790 - ACCESS_ONCE(ads->ds_ctl3) = 0;
44791 + ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
44792 + ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
44793 }
44794
44795 if (!i->is_first) {
44796 - ACCESS_ONCE(ads->ds_ctl0) = 0;
44797 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
44798 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
44799 + ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
44800 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
44801 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
44802 return;
44803 }
44804
44805 @@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44806 break;
44807 }
44808
44809 - ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
44810 + ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
44811 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
44812 | SM(i->txpower, AR_XmitPower)
44813 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
44814 @@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44815 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
44816 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
44817
44818 - ACCESS_ONCE(ads->ds_ctl1) = ctl1;
44819 - ACCESS_ONCE(ads->ds_ctl6) = ctl6;
44820 + ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
44821 + ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
44822
44823 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
44824 return;
44825
44826 - ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
44827 + ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
44828 | set11nPktDurRTSCTS(i->rates, 1);
44829
44830 - ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
44831 + ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
44832 | set11nPktDurRTSCTS(i->rates, 3);
44833
44834 - ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
44835 + ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
44836 | set11nRateFlags(i->rates, 1)
44837 | set11nRateFlags(i->rates, 2)
44838 | set11nRateFlags(i->rates, 3)
44839 diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44840 index f6c5c1b..6058354 100644
44841 --- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44842 +++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
44843 @@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44844 (i->qcu << AR_TxQcuNum_S) | desc_len;
44845
44846 checksum += val;
44847 - ACCESS_ONCE(ads->info) = val;
44848 + ACCESS_ONCE_RW(ads->info) = val;
44849
44850 checksum += i->link;
44851 - ACCESS_ONCE(ads->link) = i->link;
44852 + ACCESS_ONCE_RW(ads->link) = i->link;
44853
44854 checksum += i->buf_addr[0];
44855 - ACCESS_ONCE(ads->data0) = i->buf_addr[0];
44856 + ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
44857 checksum += i->buf_addr[1];
44858 - ACCESS_ONCE(ads->data1) = i->buf_addr[1];
44859 + ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
44860 checksum += i->buf_addr[2];
44861 - ACCESS_ONCE(ads->data2) = i->buf_addr[2];
44862 + ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
44863 checksum += i->buf_addr[3];
44864 - ACCESS_ONCE(ads->data3) = i->buf_addr[3];
44865 + ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
44866
44867 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
44868 - ACCESS_ONCE(ads->ctl3) = val;
44869 + ACCESS_ONCE_RW(ads->ctl3) = val;
44870 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
44871 - ACCESS_ONCE(ads->ctl5) = val;
44872 + ACCESS_ONCE_RW(ads->ctl5) = val;
44873 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
44874 - ACCESS_ONCE(ads->ctl7) = val;
44875 + ACCESS_ONCE_RW(ads->ctl7) = val;
44876 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
44877 - ACCESS_ONCE(ads->ctl9) = val;
44878 + ACCESS_ONCE_RW(ads->ctl9) = val;
44879
44880 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
44881 - ACCESS_ONCE(ads->ctl10) = checksum;
44882 + ACCESS_ONCE_RW(ads->ctl10) = checksum;
44883
44884 if (i->is_first || i->is_last) {
44885 - ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
44886 + ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
44887 | set11nTries(i->rates, 1)
44888 | set11nTries(i->rates, 2)
44889 | set11nTries(i->rates, 3)
44890 | (i->dur_update ? AR_DurUpdateEna : 0)
44891 | SM(0, AR_BurstDur);
44892
44893 - ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
44894 + ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
44895 | set11nRate(i->rates, 1)
44896 | set11nRate(i->rates, 2)
44897 | set11nRate(i->rates, 3);
44898 } else {
44899 - ACCESS_ONCE(ads->ctl13) = 0;
44900 - ACCESS_ONCE(ads->ctl14) = 0;
44901 + ACCESS_ONCE_RW(ads->ctl13) = 0;
44902 + ACCESS_ONCE_RW(ads->ctl14) = 0;
44903 }
44904
44905 ads->ctl20 = 0;
44906 @@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44907
44908 ctl17 = SM(i->keytype, AR_EncrType);
44909 if (!i->is_first) {
44910 - ACCESS_ONCE(ads->ctl11) = 0;
44911 - ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
44912 - ACCESS_ONCE(ads->ctl15) = 0;
44913 - ACCESS_ONCE(ads->ctl16) = 0;
44914 - ACCESS_ONCE(ads->ctl17) = ctl17;
44915 - ACCESS_ONCE(ads->ctl18) = 0;
44916 - ACCESS_ONCE(ads->ctl19) = 0;
44917 + ACCESS_ONCE_RW(ads->ctl11) = 0;
44918 + ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
44919 + ACCESS_ONCE_RW(ads->ctl15) = 0;
44920 + ACCESS_ONCE_RW(ads->ctl16) = 0;
44921 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
44922 + ACCESS_ONCE_RW(ads->ctl18) = 0;
44923 + ACCESS_ONCE_RW(ads->ctl19) = 0;
44924 return;
44925 }
44926
44927 - ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
44928 + ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
44929 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
44930 | SM(i->txpower, AR_XmitPower)
44931 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
44932 @@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
44933 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
44934 ctl12 |= SM(val, AR_PAPRDChainMask);
44935
44936 - ACCESS_ONCE(ads->ctl12) = ctl12;
44937 - ACCESS_ONCE(ads->ctl17) = ctl17;
44938 + ACCESS_ONCE_RW(ads->ctl12) = ctl12;
44939 + ACCESS_ONCE_RW(ads->ctl17) = ctl17;
44940
44941 - ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
44942 + ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
44943 | set11nPktDurRTSCTS(i->rates, 1);
44944
44945 - ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
44946 + ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
44947 | set11nPktDurRTSCTS(i->rates, 3);
44948
44949 - ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
44950 + ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
44951 | set11nRateFlags(i->rates, 1)
44952 | set11nRateFlags(i->rates, 2)
44953 | set11nRateFlags(i->rates, 3)
44954 | SM(i->rtscts_rate, AR_RTSCTSRate);
44955
44956 - ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
44957 + ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
44958 }
44959
44960 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
44961 diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
44962 index 69a907b..91e071c 100644
44963 --- a/drivers/net/wireless/ath/ath9k/hw.h
44964 +++ b/drivers/net/wireless/ath/ath9k/hw.h
44965 @@ -657,7 +657,7 @@ struct ath_hw_private_ops {
44966
44967 /* ANI */
44968 void (*ani_cache_ini_regs)(struct ath_hw *ah);
44969 -};
44970 +} __no_const;
44971
44972 /**
44973 * struct ath_spec_scan - parameters for Atheros spectral scan
44974 @@ -729,7 +729,7 @@ struct ath_hw_ops {
44975 #ifdef CONFIG_ATH9K_BTCOEX_SUPPORT
44976 void (*set_bt_ant_diversity)(struct ath_hw *hw, bool enable);
44977 #endif
44978 -};
44979 +} __no_const;
44980
44981 struct ath_nf_limits {
44982 s16 max;
44983 diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
44984 index 92190da..f3a4c4c 100644
44985 --- a/drivers/net/wireless/b43/phy_lp.c
44986 +++ b/drivers/net/wireless/b43/phy_lp.c
44987 @@ -2514,7 +2514,7 @@ static int lpphy_b2063_tune(struct b43_wldev *dev,
44988 {
44989 struct ssb_bus *bus = dev->dev->sdev->bus;
44990
44991 - static const struct b206x_channel *chandata = NULL;
44992 + const struct b206x_channel *chandata = NULL;
44993 u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
44994 u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
44995 u16 old_comm15, scale;
44996 diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
44997 index 9581d07..84f6a76 100644
44998 --- a/drivers/net/wireless/iwlegacy/3945-mac.c
44999 +++ b/drivers/net/wireless/iwlegacy/3945-mac.c
45000 @@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
45001 */
45002 if (il3945_mod_params.disable_hw_scan) {
45003 D_INFO("Disabling hw_scan\n");
45004 - il3945_mac_ops.hw_scan = NULL;
45005 + pax_open_kernel();
45006 + *(void **)&il3945_mac_ops.hw_scan = NULL;
45007 + pax_close_kernel();
45008 }
45009
45010 D_INFO("*** LOAD DRIVER ***\n");
45011 diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
45012 index d94f8ab..5b568c8 100644
45013 --- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
45014 +++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
45015 @@ -188,7 +188,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
45016 {
45017 struct iwl_priv *priv = file->private_data;
45018 char buf[64];
45019 - int buf_size;
45020 + size_t buf_size;
45021 u32 offset, len;
45022
45023 memset(buf, 0, sizeof(buf));
45024 @@ -458,7 +458,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
45025 struct iwl_priv *priv = file->private_data;
45026
45027 char buf[8];
45028 - int buf_size;
45029 + size_t buf_size;
45030 u32 reset_flag;
45031
45032 memset(buf, 0, sizeof(buf));
45033 @@ -539,7 +539,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
45034 {
45035 struct iwl_priv *priv = file->private_data;
45036 char buf[8];
45037 - int buf_size;
45038 + size_t buf_size;
45039 int ht40;
45040
45041 memset(buf, 0, sizeof(buf));
45042 @@ -591,7 +591,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
45043 {
45044 struct iwl_priv *priv = file->private_data;
45045 char buf[8];
45046 - int buf_size;
45047 + size_t buf_size;
45048 int value;
45049
45050 memset(buf, 0, sizeof(buf));
45051 @@ -683,10 +683,10 @@ DEBUGFS_READ_FILE_OPS(temperature);
45052 DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
45053 DEBUGFS_READ_FILE_OPS(current_sleep_command);
45054
45055 -static const char *fmt_value = " %-30s %10u\n";
45056 -static const char *fmt_hex = " %-30s 0x%02X\n";
45057 -static const char *fmt_table = " %-30s %10u %10u %10u %10u\n";
45058 -static const char *fmt_header =
45059 +static const char fmt_value[] = " %-30s %10u\n";
45060 +static const char fmt_hex[] = " %-30s 0x%02X\n";
45061 +static const char fmt_table[] = " %-30s %10u %10u %10u %10u\n";
45062 +static const char fmt_header[] =
45063 "%-32s current cumulative delta max\n";
45064
45065 static int iwl_statistics_flag(struct iwl_priv *priv, char *buf, int bufsz)
45066 @@ -1856,7 +1856,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
45067 {
45068 struct iwl_priv *priv = file->private_data;
45069 char buf[8];
45070 - int buf_size;
45071 + size_t buf_size;
45072 int clear;
45073
45074 memset(buf, 0, sizeof(buf));
45075 @@ -1901,7 +1901,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
45076 {
45077 struct iwl_priv *priv = file->private_data;
45078 char buf[8];
45079 - int buf_size;
45080 + size_t buf_size;
45081 int trace;
45082
45083 memset(buf, 0, sizeof(buf));
45084 @@ -1972,7 +1972,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
45085 {
45086 struct iwl_priv *priv = file->private_data;
45087 char buf[8];
45088 - int buf_size;
45089 + size_t buf_size;
45090 int missed;
45091
45092 memset(buf, 0, sizeof(buf));
45093 @@ -2013,7 +2013,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
45094
45095 struct iwl_priv *priv = file->private_data;
45096 char buf[8];
45097 - int buf_size;
45098 + size_t buf_size;
45099 int plcp;
45100
45101 memset(buf, 0, sizeof(buf));
45102 @@ -2073,7 +2073,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
45103
45104 struct iwl_priv *priv = file->private_data;
45105 char buf[8];
45106 - int buf_size;
45107 + size_t buf_size;
45108 int flush;
45109
45110 memset(buf, 0, sizeof(buf));
45111 @@ -2163,7 +2163,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
45112
45113 struct iwl_priv *priv = file->private_data;
45114 char buf[8];
45115 - int buf_size;
45116 + size_t buf_size;
45117 int rts;
45118
45119 if (!priv->cfg->ht_params)
45120 @@ -2205,7 +2205,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
45121 {
45122 struct iwl_priv *priv = file->private_data;
45123 char buf[8];
45124 - int buf_size;
45125 + size_t buf_size;
45126
45127 memset(buf, 0, sizeof(buf));
45128 buf_size = min(count, sizeof(buf) - 1);
45129 @@ -2239,7 +2239,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
45130 struct iwl_priv *priv = file->private_data;
45131 u32 event_log_flag;
45132 char buf[8];
45133 - int buf_size;
45134 + size_t buf_size;
45135
45136 /* check that the interface is up */
45137 if (!iwl_is_ready(priv))
45138 @@ -2293,7 +2293,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
45139 struct iwl_priv *priv = file->private_data;
45140 char buf[8];
45141 u32 calib_disabled;
45142 - int buf_size;
45143 + size_t buf_size;
45144
45145 memset(buf, 0, sizeof(buf));
45146 buf_size = min(count, sizeof(buf) - 1);
45147 diff --git a/drivers/net/wireless/iwlwifi/dvm/main.c b/drivers/net/wireless/iwlwifi/dvm/main.c
45148 index 7aad766..06addb4 100644
45149 --- a/drivers/net/wireless/iwlwifi/dvm/main.c
45150 +++ b/drivers/net/wireless/iwlwifi/dvm/main.c
45151 @@ -1123,7 +1123,7 @@ static void iwl_option_config(struct iwl_priv *priv)
45152 static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
45153 {
45154 struct iwl_nvm_data *data = priv->nvm_data;
45155 - char *debug_msg;
45156 + static const char debug_msg[] = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
45157
45158 if (data->sku_cap_11n_enable &&
45159 !priv->cfg->ht_params) {
45160 @@ -1137,7 +1137,6 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv)
45161 return -EINVAL;
45162 }
45163
45164 - debug_msg = "Device SKU: 24GHz %s %s, 52GHz %s %s, 11.n %s %s\n";
45165 IWL_DEBUG_INFO(priv, debug_msg,
45166 data->sku_cap_band_24GHz_enable ? "" : "NOT", "enabled",
45167 data->sku_cap_band_52GHz_enable ? "" : "NOT", "enabled",
45168 diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
45169 index 6bc3100..dd1b80d 100644
45170 --- a/drivers/net/wireless/iwlwifi/pcie/trans.c
45171 +++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
45172 @@ -1249,7 +1249,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
45173 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
45174
45175 char buf[8];
45176 - int buf_size;
45177 + size_t buf_size;
45178 u32 reset_flag;
45179
45180 memset(buf, 0, sizeof(buf));
45181 @@ -1270,7 +1270,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
45182 {
45183 struct iwl_trans *trans = file->private_data;
45184 char buf[8];
45185 - int buf_size;
45186 + size_t buf_size;
45187 int csr;
45188
45189 memset(buf, 0, sizeof(buf));
45190 diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
45191 index 2cd3f54..e936f90 100644
45192 --- a/drivers/net/wireless/mac80211_hwsim.c
45193 +++ b/drivers/net/wireless/mac80211_hwsim.c
45194 @@ -2196,25 +2196,19 @@ static int __init init_mac80211_hwsim(void)
45195
45196 if (channels > 1) {
45197 hwsim_if_comb.num_different_channels = channels;
45198 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
45199 - mac80211_hwsim_ops.cancel_hw_scan =
45200 - mac80211_hwsim_cancel_hw_scan;
45201 - mac80211_hwsim_ops.sw_scan_start = NULL;
45202 - mac80211_hwsim_ops.sw_scan_complete = NULL;
45203 - mac80211_hwsim_ops.remain_on_channel =
45204 - mac80211_hwsim_roc;
45205 - mac80211_hwsim_ops.cancel_remain_on_channel =
45206 - mac80211_hwsim_croc;
45207 - mac80211_hwsim_ops.add_chanctx =
45208 - mac80211_hwsim_add_chanctx;
45209 - mac80211_hwsim_ops.remove_chanctx =
45210 - mac80211_hwsim_remove_chanctx;
45211 - mac80211_hwsim_ops.change_chanctx =
45212 - mac80211_hwsim_change_chanctx;
45213 - mac80211_hwsim_ops.assign_vif_chanctx =
45214 - mac80211_hwsim_assign_vif_chanctx;
45215 - mac80211_hwsim_ops.unassign_vif_chanctx =
45216 - mac80211_hwsim_unassign_vif_chanctx;
45217 + pax_open_kernel();
45218 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
45219 + *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
45220 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
45221 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
45222 + *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
45223 + *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
45224 + *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
45225 + *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
45226 + *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
45227 + *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
45228 + *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
45229 + pax_close_kernel();
45230 }
45231
45232 spin_lock_init(&hwsim_radio_lock);
45233 diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
45234 index 8169a85..7fa3b47 100644
45235 --- a/drivers/net/wireless/rndis_wlan.c
45236 +++ b/drivers/net/wireless/rndis_wlan.c
45237 @@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
45238
45239 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
45240
45241 - if (rts_threshold < 0 || rts_threshold > 2347)
45242 + if (rts_threshold > 2347)
45243 rts_threshold = 2347;
45244
45245 tmp = cpu_to_le32(rts_threshold);
45246 diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
45247 index fe4c572..99dedfa 100644
45248 --- a/drivers/net/wireless/rt2x00/rt2x00.h
45249 +++ b/drivers/net/wireless/rt2x00/rt2x00.h
45250 @@ -387,7 +387,7 @@ struct rt2x00_intf {
45251 * for hardware which doesn't support hardware
45252 * sequence counting.
45253 */
45254 - atomic_t seqno;
45255 + atomic_unchecked_t seqno;
45256 };
45257
45258 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
45259 diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
45260 index 66a2db8..70cad04 100644
45261 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c
45262 +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
45263 @@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
45264 * sequence counter given by mac80211.
45265 */
45266 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
45267 - seqno = atomic_add_return(0x10, &intf->seqno);
45268 + seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
45269 else
45270 - seqno = atomic_read(&intf->seqno);
45271 + seqno = atomic_read_unchecked(&intf->seqno);
45272
45273 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
45274 hdr->seq_ctrl |= cpu_to_le16(seqno);
45275 diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
45276 index e2b3d9c..67a5184 100644
45277 --- a/drivers/net/wireless/ti/wl1251/sdio.c
45278 +++ b/drivers/net/wireless/ti/wl1251/sdio.c
45279 @@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
45280
45281 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
45282
45283 - wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
45284 - wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
45285 + pax_open_kernel();
45286 + *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
45287 + *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
45288 + pax_close_kernel();
45289
45290 wl1251_info("using dedicated interrupt line");
45291 } else {
45292 - wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
45293 - wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
45294 + pax_open_kernel();
45295 + *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
45296 + *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
45297 + pax_close_kernel();
45298
45299 wl1251_info("using SDIO interrupt");
45300 }
45301 diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
45302 index 1c627da..69f7d17 100644
45303 --- a/drivers/net/wireless/ti/wl12xx/main.c
45304 +++ b/drivers/net/wireless/ti/wl12xx/main.c
45305 @@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
45306 sizeof(wl->conf.mem));
45307
45308 /* read data preparation is only needed by wl127x */
45309 - wl->ops->prepare_read = wl127x_prepare_read;
45310 + pax_open_kernel();
45311 + *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
45312 + pax_close_kernel();
45313
45314 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
45315 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
45316 @@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
45317 sizeof(wl->conf.mem));
45318
45319 /* read data preparation is only needed by wl127x */
45320 - wl->ops->prepare_read = wl127x_prepare_read;
45321 + pax_open_kernel();
45322 + *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
45323 + pax_close_kernel();
45324
45325 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
45326 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
45327 diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
45328 index 7aa0eb8..5a9ef38 100644
45329 --- a/drivers/net/wireless/ti/wl18xx/main.c
45330 +++ b/drivers/net/wireless/ti/wl18xx/main.c
45331 @@ -1730,8 +1730,10 @@ static int wl18xx_setup(struct wl1271 *wl)
45332 }
45333
45334 if (!checksum_param) {
45335 - wl18xx_ops.set_rx_csum = NULL;
45336 - wl18xx_ops.init_vif = NULL;
45337 + pax_open_kernel();
45338 + *(void **)&wl18xx_ops.set_rx_csum = NULL;
45339 + *(void **)&wl18xx_ops.init_vif = NULL;
45340 + pax_close_kernel();
45341 }
45342
45343 /* Enable 11a Band only if we have 5G antennas */
45344 diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
45345 index 7ef0b4a..ff65c28 100644
45346 --- a/drivers/net/wireless/zd1211rw/zd_usb.c
45347 +++ b/drivers/net/wireless/zd1211rw/zd_usb.c
45348 @@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
45349 {
45350 struct zd_usb *usb = urb->context;
45351 struct zd_usb_interrupt *intr = &usb->intr;
45352 - int len;
45353 + unsigned int len;
45354 u16 int_num;
45355
45356 ZD_ASSERT(in_interrupt());
45357 diff --git a/drivers/nfc/nfcwilink.c b/drivers/nfc/nfcwilink.c
45358 index 59f95d8..53e0e7f 100644
45359 --- a/drivers/nfc/nfcwilink.c
45360 +++ b/drivers/nfc/nfcwilink.c
45361 @@ -513,7 +513,7 @@ static struct nci_ops nfcwilink_ops = {
45362
45363 static int nfcwilink_probe(struct platform_device *pdev)
45364 {
45365 - static struct nfcwilink *drv;
45366 + struct nfcwilink *drv;
45367 int rc;
45368 __u32 protocols;
45369
45370 diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
45371 index d93b2b6..ae50401 100644
45372 --- a/drivers/oprofile/buffer_sync.c
45373 +++ b/drivers/oprofile/buffer_sync.c
45374 @@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
45375 if (cookie == NO_COOKIE)
45376 offset = pc;
45377 if (cookie == INVALID_COOKIE) {
45378 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
45379 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
45380 offset = pc;
45381 }
45382 if (cookie != last_cookie) {
45383 @@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
45384 /* add userspace sample */
45385
45386 if (!mm) {
45387 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
45388 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
45389 return 0;
45390 }
45391
45392 cookie = lookup_dcookie(mm, s->eip, &offset);
45393
45394 if (cookie == INVALID_COOKIE) {
45395 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
45396 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
45397 return 0;
45398 }
45399
45400 @@ -552,7 +552,7 @@ void sync_buffer(int cpu)
45401 /* ignore backtraces if failed to add a sample */
45402 if (state == sb_bt_start) {
45403 state = sb_bt_ignore;
45404 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
45405 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
45406 }
45407 }
45408 release_mm(mm);
45409 diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
45410 index c0cc4e7..44d4e54 100644
45411 --- a/drivers/oprofile/event_buffer.c
45412 +++ b/drivers/oprofile/event_buffer.c
45413 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
45414 }
45415
45416 if (buffer_pos == buffer_size) {
45417 - atomic_inc(&oprofile_stats.event_lost_overflow);
45418 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
45419 return;
45420 }
45421
45422 diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
45423 index ed2c3ec..deda85a 100644
45424 --- a/drivers/oprofile/oprof.c
45425 +++ b/drivers/oprofile/oprof.c
45426 @@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
45427 if (oprofile_ops.switch_events())
45428 return;
45429
45430 - atomic_inc(&oprofile_stats.multiplex_counter);
45431 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
45432 start_switch_worker();
45433 }
45434
45435 diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
45436 index ee2cfce..7f8f699 100644
45437 --- a/drivers/oprofile/oprofile_files.c
45438 +++ b/drivers/oprofile/oprofile_files.c
45439 @@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
45440
45441 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
45442
45443 -static ssize_t timeout_read(struct file *file, char __user *buf,
45444 +static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
45445 size_t count, loff_t *offset)
45446 {
45447 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
45448 diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
45449 index 59659ce..6c860a0 100644
45450 --- a/drivers/oprofile/oprofile_stats.c
45451 +++ b/drivers/oprofile/oprofile_stats.c
45452 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
45453 cpu_buf->sample_invalid_eip = 0;
45454 }
45455
45456 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
45457 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
45458 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
45459 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
45460 - atomic_set(&oprofile_stats.multiplex_counter, 0);
45461 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
45462 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
45463 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
45464 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
45465 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
45466 }
45467
45468
45469 diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
45470 index 1fc622b..8c48fc3 100644
45471 --- a/drivers/oprofile/oprofile_stats.h
45472 +++ b/drivers/oprofile/oprofile_stats.h
45473 @@ -13,11 +13,11 @@
45474 #include <linux/atomic.h>
45475
45476 struct oprofile_stat_struct {
45477 - atomic_t sample_lost_no_mm;
45478 - atomic_t sample_lost_no_mapping;
45479 - atomic_t bt_lost_no_mapping;
45480 - atomic_t event_lost_overflow;
45481 - atomic_t multiplex_counter;
45482 + atomic_unchecked_t sample_lost_no_mm;
45483 + atomic_unchecked_t sample_lost_no_mapping;
45484 + atomic_unchecked_t bt_lost_no_mapping;
45485 + atomic_unchecked_t event_lost_overflow;
45486 + atomic_unchecked_t multiplex_counter;
45487 };
45488
45489 extern struct oprofile_stat_struct oprofile_stats;
45490 diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
45491 index 3f49345..c750d0b 100644
45492 --- a/drivers/oprofile/oprofilefs.c
45493 +++ b/drivers/oprofile/oprofilefs.c
45494 @@ -176,8 +176,8 @@ int oprofilefs_create_ro_ulong(struct dentry *root,
45495
45496 static ssize_t atomic_read_file(struct file *file, char __user *buf, size_t count, loff_t *offset)
45497 {
45498 - atomic_t *val = file->private_data;
45499 - return oprofilefs_ulong_to_user(atomic_read(val), buf, count, offset);
45500 + atomic_unchecked_t *val = file->private_data;
45501 + return oprofilefs_ulong_to_user(atomic_read_unchecked(val), buf, count, offset);
45502 }
45503
45504
45505 @@ -189,7 +189,7 @@ static const struct file_operations atomic_ro_fops = {
45506
45507
45508 int oprofilefs_create_ro_atomic(struct dentry *root,
45509 - char const *name, atomic_t *val)
45510 + char const *name, atomic_unchecked_t *val)
45511 {
45512 return __oprofilefs_create_file(root, name,
45513 &atomic_ro_fops, 0444, val);
45514 diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
45515 index 61be1d9..dec05d7 100644
45516 --- a/drivers/oprofile/timer_int.c
45517 +++ b/drivers/oprofile/timer_int.c
45518 @@ -93,7 +93,7 @@ static int oprofile_cpu_notify(struct notifier_block *self,
45519 return NOTIFY_OK;
45520 }
45521
45522 -static struct notifier_block __refdata oprofile_cpu_notifier = {
45523 +static struct notifier_block oprofile_cpu_notifier = {
45524 .notifier_call = oprofile_cpu_notify,
45525 };
45526
45527 diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
45528 index 92ed045..62d39bd7 100644
45529 --- a/drivers/parport/procfs.c
45530 +++ b/drivers/parport/procfs.c
45531 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
45532
45533 *ppos += len;
45534
45535 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
45536 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
45537 }
45538
45539 #ifdef CONFIG_PARPORT_1284
45540 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
45541
45542 *ppos += len;
45543
45544 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
45545 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
45546 }
45547 #endif /* IEEE1284.3 support. */
45548
45549 diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
45550 index 2f5786c..61ab4d1 100644
45551 --- a/drivers/pci/hotplug/acpiphp_ibm.c
45552 +++ b/drivers/pci/hotplug/acpiphp_ibm.c
45553 @@ -463,7 +463,9 @@ static int __init ibm_acpiphp_init(void)
45554 goto init_cleanup;
45555 }
45556
45557 - ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
45558 + pax_open_kernel();
45559 + *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
45560 + pax_close_kernel();
45561 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
45562
45563 return retval;
45564 diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
45565 index a6a71c4..c91097b 100644
45566 --- a/drivers/pci/hotplug/cpcihp_generic.c
45567 +++ b/drivers/pci/hotplug/cpcihp_generic.c
45568 @@ -73,7 +73,6 @@ static u16 port;
45569 static unsigned int enum_bit;
45570 static u8 enum_mask;
45571
45572 -static struct cpci_hp_controller_ops generic_hpc_ops;
45573 static struct cpci_hp_controller generic_hpc;
45574
45575 static int __init validate_parameters(void)
45576 @@ -139,6 +138,10 @@ static int query_enum(void)
45577 return ((value & enum_mask) == enum_mask);
45578 }
45579
45580 +static struct cpci_hp_controller_ops generic_hpc_ops = {
45581 + .query_enum = query_enum,
45582 +};
45583 +
45584 static int __init cpcihp_generic_init(void)
45585 {
45586 int status;
45587 @@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
45588 pci_dev_put(dev);
45589
45590 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
45591 - generic_hpc_ops.query_enum = query_enum;
45592 generic_hpc.ops = &generic_hpc_ops;
45593
45594 status = cpci_hp_register_controller(&generic_hpc);
45595 diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
45596 index 449b4bb..257e2e8 100644
45597 --- a/drivers/pci/hotplug/cpcihp_zt5550.c
45598 +++ b/drivers/pci/hotplug/cpcihp_zt5550.c
45599 @@ -59,7 +59,6 @@
45600 /* local variables */
45601 static bool debug;
45602 static bool poll;
45603 -static struct cpci_hp_controller_ops zt5550_hpc_ops;
45604 static struct cpci_hp_controller zt5550_hpc;
45605
45606 /* Primary cPCI bus bridge device */
45607 @@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
45608 return 0;
45609 }
45610
45611 +static struct cpci_hp_controller_ops zt5550_hpc_ops = {
45612 + .query_enum = zt5550_hc_query_enum,
45613 +};
45614 +
45615 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
45616 {
45617 int status;
45618 @@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
45619 dbg("returned from zt5550_hc_config");
45620
45621 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
45622 - zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
45623 zt5550_hpc.ops = &zt5550_hpc_ops;
45624 if(!poll) {
45625 zt5550_hpc.irq = hc_dev->irq;
45626 zt5550_hpc.irq_flags = IRQF_SHARED;
45627 zt5550_hpc.dev_id = hc_dev;
45628
45629 - zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
45630 - zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
45631 - zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
45632 + pax_open_kernel();
45633 + *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
45634 + *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
45635 + *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
45636 + pax_open_kernel();
45637 } else {
45638 info("using ENUM# polling mode");
45639 }
45640 diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
45641 index 76ba8a1..20ca857 100644
45642 --- a/drivers/pci/hotplug/cpqphp_nvram.c
45643 +++ b/drivers/pci/hotplug/cpqphp_nvram.c
45644 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
45645
45646 void compaq_nvram_init (void __iomem *rom_start)
45647 {
45648 +
45649 +#ifndef CONFIG_PAX_KERNEXEC
45650 if (rom_start) {
45651 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
45652 }
45653 +#endif
45654 +
45655 dbg("int15 entry = %p\n", compaq_int15_entry_point);
45656
45657 /* initialize our int15 lock */
45658 diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
45659 index ec20f74..c1d961e 100644
45660 --- a/drivers/pci/hotplug/pci_hotplug_core.c
45661 +++ b/drivers/pci/hotplug/pci_hotplug_core.c
45662 @@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
45663 return -EINVAL;
45664 }
45665
45666 - slot->ops->owner = owner;
45667 - slot->ops->mod_name = mod_name;
45668 + pax_open_kernel();
45669 + *(struct module **)&slot->ops->owner = owner;
45670 + *(const char **)&slot->ops->mod_name = mod_name;
45671 + pax_close_kernel();
45672
45673 mutex_lock(&pci_hp_mutex);
45674 /*
45675 diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
45676 index f4a18f5..ff2463c 100644
45677 --- a/drivers/pci/hotplug/pciehp_core.c
45678 +++ b/drivers/pci/hotplug/pciehp_core.c
45679 @@ -92,7 +92,7 @@ static int init_slot(struct controller *ctrl)
45680 struct slot *slot = ctrl->slot;
45681 struct hotplug_slot *hotplug = NULL;
45682 struct hotplug_slot_info *info = NULL;
45683 - struct hotplug_slot_ops *ops = NULL;
45684 + hotplug_slot_ops_no_const *ops = NULL;
45685 char name[SLOT_NAME_SIZE];
45686 int retval = -ENOMEM;
45687
45688 diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
45689 index 7128cfd..db7c65b 100644
45690 --- a/drivers/pci/pci-sysfs.c
45691 +++ b/drivers/pci/pci-sysfs.c
45692 @@ -634,6 +634,10 @@ pci_write_config(struct file* filp, struct kobject *kobj,
45693 loff_t init_off = off;
45694 u8 *data = (u8*) buf;
45695
45696 +#ifdef CONFIG_GRKERNSEC_KMEM
45697 + return -EPERM;
45698 +#endif
45699 +
45700 if (off > dev->cfg_size)
45701 return 0;
45702 if (off + count > dev->cfg_size) {
45703 @@ -940,6 +944,10 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
45704 resource_size_t start, end;
45705 int i;
45706
45707 +#ifdef CONFIG_GRKERNSEC_KMEM
45708 + return -EPERM;
45709 +#endif
45710 +
45711 for (i = 0; i < PCI_ROM_RESOURCE; i++)
45712 if (res == &pdev->resource[i])
45713 break;
45714 @@ -1047,6 +1055,10 @@ pci_write_resource_io(struct file *filp, struct kobject *kobj,
45715 struct bin_attribute *attr, char *buf,
45716 loff_t off, size_t count)
45717 {
45718 +#ifdef CONFIG_GRKERNSEC_KMEM
45719 + return -EPERM;
45720 +#endif
45721 +
45722 return pci_resource_io(filp, kobj, attr, buf, off, count, true);
45723 }
45724
45725 @@ -1083,7 +1095,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
45726 {
45727 /* allocate attribute structure, piggyback attribute name */
45728 int name_len = write_combine ? 13 : 10;
45729 - struct bin_attribute *res_attr;
45730 + bin_attribute_no_const *res_attr;
45731 int retval;
45732
45733 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
45734 @@ -1268,7 +1280,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
45735 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
45736 {
45737 int retval;
45738 - struct bin_attribute *attr;
45739 + bin_attribute_no_const *attr;
45740
45741 /* If the device has VPD, try to expose it in sysfs. */
45742 if (dev->vpd) {
45743 @@ -1315,7 +1327,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
45744 {
45745 int retval;
45746 int rom_size = 0;
45747 - struct bin_attribute *attr;
45748 + bin_attribute_no_const *attr;
45749
45750 if (!sysfs_initialized)
45751 return -EACCES;
45752 diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
45753 index 8a00c06..18a9715 100644
45754 --- a/drivers/pci/pci.h
45755 +++ b/drivers/pci/pci.h
45756 @@ -95,7 +95,7 @@ struct pci_vpd_ops {
45757 struct pci_vpd {
45758 unsigned int len;
45759 const struct pci_vpd_ops *ops;
45760 - struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
45761 + bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
45762 };
45763
45764 int pci_vpd_pci22_init(struct pci_dev *dev);
45765 diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
45766 index 403a443..034e050 100644
45767 --- a/drivers/pci/pcie/aspm.c
45768 +++ b/drivers/pci/pcie/aspm.c
45769 @@ -27,9 +27,9 @@
45770 #define MODULE_PARAM_PREFIX "pcie_aspm."
45771
45772 /* Note: those are not register definitions */
45773 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
45774 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
45775 -#define ASPM_STATE_L1 (4) /* L1 state */
45776 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
45777 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
45778 +#define ASPM_STATE_L1 (4U) /* L1 state */
45779 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
45780 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
45781
45782 diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
45783 index 7ef0f86..17b710f 100644
45784 --- a/drivers/pci/probe.c
45785 +++ b/drivers/pci/probe.c
45786 @@ -175,7 +175,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
45787 struct pci_bus_region region, inverted_region;
45788 bool bar_too_big = false, bar_disabled = false;
45789
45790 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
45791 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
45792
45793 /* No printks while decoding is disabled! */
45794 if (!dev->mmio_always_on) {
45795 diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
45796 index cdc7836..2e0eb94 100644
45797 --- a/drivers/pci/proc.c
45798 +++ b/drivers/pci/proc.c
45799 @@ -117,6 +117,10 @@ proc_bus_pci_write(struct file *file, const char __user *buf, size_t nbytes, lof
45800 int size = dev->cfg_size;
45801 int cnt;
45802
45803 +#ifdef CONFIG_GRKERNSEC_KMEM
45804 + return -EPERM;
45805 +#endif
45806 +
45807 if (pos >= size)
45808 return 0;
45809 if (nbytes >= size)
45810 @@ -196,6 +200,10 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd,
45811 #endif /* HAVE_PCI_MMAP */
45812 int ret = 0;
45813
45814 +#ifdef CONFIG_GRKERNSEC_KMEM
45815 + return -EPERM;
45816 +#endif
45817 +
45818 switch (cmd) {
45819 case PCIIOC_CONTROLLER:
45820 ret = pci_domain_nr(dev->bus);
45821 @@ -234,6 +242,10 @@ static int proc_bus_pci_mmap(struct file *file, struct vm_area_struct *vma)
45822 struct pci_filp_private *fpriv = file->private_data;
45823 int i, ret;
45824
45825 +#ifdef CONFIG_GRKERNSEC_KMEM
45826 + return -EPERM;
45827 +#endif
45828 +
45829 if (!capable(CAP_SYS_RAWIO))
45830 return -EPERM;
45831
45832 @@ -434,7 +446,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
45833 static int __init pci_proc_init(void)
45834 {
45835 struct pci_dev *dev = NULL;
45836 +
45837 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
45838 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45839 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
45840 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45841 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
45842 +#endif
45843 +#else
45844 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
45845 +#endif
45846 proc_create("devices", 0, proc_bus_pci_dir,
45847 &proc_bus_pci_dev_operations);
45848 proc_initialized = 1;
45849 diff --git a/drivers/pci/syscall.c b/drivers/pci/syscall.c
45850 index e1c1ec5..bef4210 100644
45851 --- a/drivers/pci/syscall.c
45852 +++ b/drivers/pci/syscall.c
45853 @@ -92,6 +92,10 @@ SYSCALL_DEFINE5(pciconfig_write, unsigned long, bus, unsigned long, dfn,
45854 u32 dword;
45855 int err = 0;
45856
45857 +#ifdef CONFIG_GRKERNSEC_KMEM
45858 + return -EPERM
45859 +#endif
45860 +
45861 if (!capable(CAP_SYS_ADMIN))
45862 return -EPERM;
45863
45864 diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
45865 index 19c313b..ed28b38 100644
45866 --- a/drivers/platform/x86/asus-wmi.c
45867 +++ b/drivers/platform/x86/asus-wmi.c
45868 @@ -1618,6 +1618,10 @@ static int show_dsts(struct seq_file *m, void *data)
45869 int err;
45870 u32 retval = -1;
45871
45872 +#ifdef CONFIG_GRKERNSEC_KMEM
45873 + return -EPERM;
45874 +#endif
45875 +
45876 err = asus_wmi_get_devstate(asus, asus->debug.dev_id, &retval);
45877
45878 if (err < 0)
45879 @@ -1634,6 +1638,10 @@ static int show_devs(struct seq_file *m, void *data)
45880 int err;
45881 u32 retval = -1;
45882
45883 +#ifdef CONFIG_GRKERNSEC_KMEM
45884 + return -EPERM;
45885 +#endif
45886 +
45887 err = asus_wmi_set_devstate(asus->debug.dev_id, asus->debug.ctrl_param,
45888 &retval);
45889
45890 @@ -1658,6 +1666,10 @@ static int show_call(struct seq_file *m, void *data)
45891 union acpi_object *obj;
45892 acpi_status status;
45893
45894 +#ifdef CONFIG_GRKERNSEC_KMEM
45895 + return -EPERM;
45896 +#endif
45897 +
45898 status = wmi_evaluate_method(ASUS_WMI_MGMT_GUID,
45899 1, asus->debug.method_id,
45900 &input, &output);
45901 diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
45902 index 3e5b4497..dcdfb70 100644
45903 --- a/drivers/platform/x86/chromeos_laptop.c
45904 +++ b/drivers/platform/x86/chromeos_laptop.c
45905 @@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
45906 return 0;
45907 }
45908
45909 -static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
45910 +static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
45911 {
45912 .ident = "Samsung Series 5 550 - Touchpad",
45913 .matches = {
45914 diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
45915 index 62f8030..c7f2a45 100644
45916 --- a/drivers/platform/x86/msi-laptop.c
45917 +++ b/drivers/platform/x86/msi-laptop.c
45918 @@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
45919
45920 if (!quirks->ec_read_only) {
45921 /* allow userland write sysfs file */
45922 - dev_attr_bluetooth.store = store_bluetooth;
45923 - dev_attr_wlan.store = store_wlan;
45924 - dev_attr_threeg.store = store_threeg;
45925 - dev_attr_bluetooth.attr.mode |= S_IWUSR;
45926 - dev_attr_wlan.attr.mode |= S_IWUSR;
45927 - dev_attr_threeg.attr.mode |= S_IWUSR;
45928 + pax_open_kernel();
45929 + *(void **)&dev_attr_bluetooth.store = store_bluetooth;
45930 + *(void **)&dev_attr_wlan.store = store_wlan;
45931 + *(void **)&dev_attr_threeg.store = store_threeg;
45932 + *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
45933 + *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
45934 + *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
45935 + pax_close_kernel();
45936 }
45937
45938 /* disable hardware control by fn key */
45939 diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
45940 index 70222f2..8c8ce66 100644
45941 --- a/drivers/platform/x86/msi-wmi.c
45942 +++ b/drivers/platform/x86/msi-wmi.c
45943 @@ -183,7 +183,7 @@ static const struct backlight_ops msi_backlight_ops = {
45944 static void msi_wmi_notify(u32 value, void *context)
45945 {
45946 struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
45947 - static struct key_entry *key;
45948 + struct key_entry *key;
45949 union acpi_object *obj;
45950 acpi_status status;
45951
45952 diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
45953 index 3484dd2..13ee730 100644
45954 --- a/drivers/platform/x86/sony-laptop.c
45955 +++ b/drivers/platform/x86/sony-laptop.c
45956 @@ -2448,7 +2448,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
45957 }
45958
45959 /* High speed charging function */
45960 -static struct device_attribute *hsc_handle;
45961 +static device_attribute_no_const *hsc_handle;
45962
45963 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
45964 struct device_attribute *attr,
45965 diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
45966 index 4e86e97..04d50d1 100644
45967 --- a/drivers/platform/x86/thinkpad_acpi.c
45968 +++ b/drivers/platform/x86/thinkpad_acpi.c
45969 @@ -2091,7 +2091,7 @@ static int hotkey_mask_get(void)
45970 return 0;
45971 }
45972
45973 -void static hotkey_mask_warn_incomplete_mask(void)
45974 +static void hotkey_mask_warn_incomplete_mask(void)
45975 {
45976 /* log only what the user can fix... */
45977 const u32 wantedmask = hotkey_driver_mask &
45978 @@ -2318,11 +2318,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
45979 }
45980 }
45981
45982 -static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45983 - struct tp_nvram_state *newn,
45984 - const u32 event_mask)
45985 -{
45986 -
45987 #define TPACPI_COMPARE_KEY(__scancode, __member) \
45988 do { \
45989 if ((event_mask & (1 << __scancode)) && \
45990 @@ -2336,36 +2331,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
45991 tpacpi_hotkey_send_key(__scancode); \
45992 } while (0)
45993
45994 - void issue_volchange(const unsigned int oldvol,
45995 - const unsigned int newvol)
45996 - {
45997 - unsigned int i = oldvol;
45998 +static void issue_volchange(const unsigned int oldvol,
45999 + const unsigned int newvol,
46000 + const u32 event_mask)
46001 +{
46002 + unsigned int i = oldvol;
46003
46004 - while (i > newvol) {
46005 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
46006 - i--;
46007 - }
46008 - while (i < newvol) {
46009 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
46010 - i++;
46011 - }
46012 + while (i > newvol) {
46013 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
46014 + i--;
46015 }
46016 + while (i < newvol) {
46017 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
46018 + i++;
46019 + }
46020 +}
46021
46022 - void issue_brightnesschange(const unsigned int oldbrt,
46023 - const unsigned int newbrt)
46024 - {
46025 - unsigned int i = oldbrt;
46026 +static void issue_brightnesschange(const unsigned int oldbrt,
46027 + const unsigned int newbrt,
46028 + const u32 event_mask)
46029 +{
46030 + unsigned int i = oldbrt;
46031
46032 - while (i > newbrt) {
46033 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
46034 - i--;
46035 - }
46036 - while (i < newbrt) {
46037 - TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
46038 - i++;
46039 - }
46040 + while (i > newbrt) {
46041 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
46042 + i--;
46043 + }
46044 + while (i < newbrt) {
46045 + TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
46046 + i++;
46047 }
46048 +}
46049
46050 +static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
46051 + struct tp_nvram_state *newn,
46052 + const u32 event_mask)
46053 +{
46054 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
46055 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
46056 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
46057 @@ -2399,7 +2400,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
46058 oldn->volume_level != newn->volume_level) {
46059 /* recently muted, or repeated mute keypress, or
46060 * multiple presses ending in mute */
46061 - issue_volchange(oldn->volume_level, newn->volume_level);
46062 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
46063 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
46064 }
46065 } else {
46066 @@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
46067 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
46068 }
46069 if (oldn->volume_level != newn->volume_level) {
46070 - issue_volchange(oldn->volume_level, newn->volume_level);
46071 + issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
46072 } else if (oldn->volume_toggle != newn->volume_toggle) {
46073 /* repeated vol up/down keypress at end of scale ? */
46074 if (newn->volume_level == 0)
46075 @@ -2422,7 +2423,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
46076 /* handle brightness */
46077 if (oldn->brightness_level != newn->brightness_level) {
46078 issue_brightnesschange(oldn->brightness_level,
46079 - newn->brightness_level);
46080 + newn->brightness_level,
46081 + event_mask);
46082 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
46083 /* repeated key presses that didn't change state */
46084 if (newn->brightness_level == 0)
46085 @@ -2431,10 +2433,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
46086 && !tp_features.bright_unkfw)
46087 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
46088 }
46089 +}
46090
46091 #undef TPACPI_COMPARE_KEY
46092 #undef TPACPI_MAY_SEND_KEY
46093 -}
46094
46095 /*
46096 * Polling driver
46097 diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
46098 index 769d265..a3a05ca 100644
46099 --- a/drivers/pnp/pnpbios/bioscalls.c
46100 +++ b/drivers/pnp/pnpbios/bioscalls.c
46101 @@ -58,7 +58,7 @@ do { \
46102 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
46103 } while(0)
46104
46105 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
46106 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
46107 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
46108
46109 /*
46110 @@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
46111
46112 cpu = get_cpu();
46113 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
46114 +
46115 + pax_open_kernel();
46116 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
46117 + pax_close_kernel();
46118
46119 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
46120 spin_lock_irqsave(&pnp_bios_lock, flags);
46121 @@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
46122 :"memory");
46123 spin_unlock_irqrestore(&pnp_bios_lock, flags);
46124
46125 + pax_open_kernel();
46126 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
46127 + pax_close_kernel();
46128 +
46129 put_cpu();
46130
46131 /* If we get here and this is set then the PnP BIOS faulted on us. */
46132 @@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
46133 return status;
46134 }
46135
46136 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
46137 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
46138 {
46139 int i;
46140
46141 @@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
46142 pnp_bios_callpoint.offset = header->fields.pm16offset;
46143 pnp_bios_callpoint.segment = PNP_CS16;
46144
46145 + pax_open_kernel();
46146 +
46147 for_each_possible_cpu(i) {
46148 struct desc_struct *gdt = get_cpu_gdt_table(i);
46149 if (!gdt)
46150 @@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
46151 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
46152 (unsigned long)__va(header->fields.pm16dseg));
46153 }
46154 +
46155 + pax_close_kernel();
46156 }
46157 diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
46158 index d95e101..67f0c3f 100644
46159 --- a/drivers/pnp/resource.c
46160 +++ b/drivers/pnp/resource.c
46161 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
46162 return 1;
46163
46164 /* check if the resource is valid */
46165 - if (*irq < 0 || *irq > 15)
46166 + if (*irq > 15)
46167 return 0;
46168
46169 /* check if the resource is reserved */
46170 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
46171 return 1;
46172
46173 /* check if the resource is valid */
46174 - if (*dma < 0 || *dma == 4 || *dma > 7)
46175 + if (*dma == 4 || *dma > 7)
46176 return 0;
46177
46178 /* check if the resource is reserved */
46179 diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
46180 index 0c52e2a..3421ab7 100644
46181 --- a/drivers/power/pda_power.c
46182 +++ b/drivers/power/pda_power.c
46183 @@ -37,7 +37,11 @@ static int polling;
46184
46185 #if IS_ENABLED(CONFIG_USB_PHY)
46186 static struct usb_phy *transceiver;
46187 -static struct notifier_block otg_nb;
46188 +static int otg_handle_notification(struct notifier_block *nb,
46189 + unsigned long event, void *unused);
46190 +static struct notifier_block otg_nb = {
46191 + .notifier_call = otg_handle_notification
46192 +};
46193 #endif
46194
46195 static struct regulator *ac_draw;
46196 @@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
46197
46198 #if IS_ENABLED(CONFIG_USB_PHY)
46199 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
46200 - otg_nb.notifier_call = otg_handle_notification;
46201 ret = usb_register_notifier(transceiver, &otg_nb);
46202 if (ret) {
46203 dev_err(dev, "failure to register otg notifier\n");
46204 diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
46205 index cc439fd..8fa30df 100644
46206 --- a/drivers/power/power_supply.h
46207 +++ b/drivers/power/power_supply.h
46208 @@ -16,12 +16,12 @@ struct power_supply;
46209
46210 #ifdef CONFIG_SYSFS
46211
46212 -extern void power_supply_init_attrs(struct device_type *dev_type);
46213 +extern void power_supply_init_attrs(void);
46214 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
46215
46216 #else
46217
46218 -static inline void power_supply_init_attrs(struct device_type *dev_type) {}
46219 +static inline void power_supply_init_attrs(void) {}
46220 #define power_supply_uevent NULL
46221
46222 #endif /* CONFIG_SYSFS */
46223 diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
46224 index 00e6672..2642c08 100644
46225 --- a/drivers/power/power_supply_core.c
46226 +++ b/drivers/power/power_supply_core.c
46227 @@ -24,7 +24,10 @@
46228 struct class *power_supply_class;
46229 EXPORT_SYMBOL_GPL(power_supply_class);
46230
46231 -static struct device_type power_supply_dev_type;
46232 +extern const struct attribute_group *power_supply_attr_groups[];
46233 +static struct device_type power_supply_dev_type = {
46234 + .groups = power_supply_attr_groups,
46235 +};
46236
46237 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
46238 struct power_supply *supply)
46239 @@ -584,7 +587,7 @@ static int __init power_supply_class_init(void)
46240 return PTR_ERR(power_supply_class);
46241
46242 power_supply_class->dev_uevent = power_supply_uevent;
46243 - power_supply_init_attrs(&power_supply_dev_type);
46244 + power_supply_init_attrs();
46245
46246 return 0;
46247 }
46248 diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
46249 index 44420d1..967126e 100644
46250 --- a/drivers/power/power_supply_sysfs.c
46251 +++ b/drivers/power/power_supply_sysfs.c
46252 @@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
46253 .is_visible = power_supply_attr_is_visible,
46254 };
46255
46256 -static const struct attribute_group *power_supply_attr_groups[] = {
46257 +const struct attribute_group *power_supply_attr_groups[] = {
46258 &power_supply_attr_group,
46259 NULL,
46260 };
46261
46262 -void power_supply_init_attrs(struct device_type *dev_type)
46263 +void power_supply_init_attrs(void)
46264 {
46265 int i;
46266
46267 - dev_type->groups = power_supply_attr_groups;
46268 -
46269 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
46270 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
46271 }
46272 diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
46273 index a01b8b3..37c2afe 100644
46274 --- a/drivers/regulator/core.c
46275 +++ b/drivers/regulator/core.c
46276 @@ -3307,7 +3307,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
46277 {
46278 const struct regulation_constraints *constraints = NULL;
46279 const struct regulator_init_data *init_data;
46280 - static atomic_t regulator_no = ATOMIC_INIT(0);
46281 + static atomic_unchecked_t regulator_no = ATOMIC_INIT(0);
46282 struct regulator_dev *rdev;
46283 struct device *dev;
46284 int ret, i;
46285 @@ -3377,7 +3377,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
46286 rdev->dev.of_node = config->of_node;
46287 rdev->dev.parent = dev;
46288 dev_set_name(&rdev->dev, "regulator.%d",
46289 - atomic_inc_return(&regulator_no) - 1);
46290 + atomic_inc_return_unchecked(&regulator_no) - 1);
46291 ret = device_register(&rdev->dev);
46292 if (ret != 0) {
46293 put_device(&rdev->dev);
46294 diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
46295 index 144bcac..d20e7db 100644
46296 --- a/drivers/regulator/max8660.c
46297 +++ b/drivers/regulator/max8660.c
46298 @@ -420,8 +420,10 @@ static int max8660_probe(struct i2c_client *client,
46299 max8660->shadow_regs[MAX8660_OVER1] = 5;
46300 } else {
46301 /* Otherwise devices can be toggled via software */
46302 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
46303 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
46304 + pax_open_kernel();
46305 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
46306 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
46307 + pax_close_kernel();
46308 }
46309
46310 /*
46311 diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
46312 index 5b77ab7..a62f061 100644
46313 --- a/drivers/regulator/max8973-regulator.c
46314 +++ b/drivers/regulator/max8973-regulator.c
46315 @@ -406,9 +406,11 @@ static int max8973_probe(struct i2c_client *client,
46316 if (!pdata || !pdata->enable_ext_control) {
46317 max->desc.enable_reg = MAX8973_VOUT;
46318 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
46319 - max->ops.enable = regulator_enable_regmap;
46320 - max->ops.disable = regulator_disable_regmap;
46321 - max->ops.is_enabled = regulator_is_enabled_regmap;
46322 + pax_open_kernel();
46323 + *(void **)&max->ops.enable = regulator_enable_regmap;
46324 + *(void **)&max->ops.disable = regulator_disable_regmap;
46325 + *(void **)&max->ops.is_enabled = regulator_is_enabled_regmap;
46326 + pax_close_kernel();
46327 }
46328
46329 if (pdata) {
46330 diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
46331 index 1037e07..e64dea1 100644
46332 --- a/drivers/regulator/mc13892-regulator.c
46333 +++ b/drivers/regulator/mc13892-regulator.c
46334 @@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
46335 }
46336 mc13xxx_unlock(mc13892);
46337
46338 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
46339 + pax_open_kernel();
46340 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
46341 = mc13892_vcam_set_mode;
46342 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
46343 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
46344 = mc13892_vcam_get_mode;
46345 + pax_close_kernel();
46346
46347 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
46348 ARRAY_SIZE(mc13892_regulators));
46349 diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
46350 index 24e733c..bfbaa3e 100644
46351 --- a/drivers/rtc/rtc-cmos.c
46352 +++ b/drivers/rtc/rtc-cmos.c
46353 @@ -731,7 +731,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
46354 hpet_rtc_timer_init();
46355
46356 /* export at least the first block of NVRAM */
46357 - nvram.size = address_space - NVRAM_OFFSET;
46358 + pax_open_kernel();
46359 + *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
46360 + pax_close_kernel();
46361 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
46362 if (retval < 0) {
46363 dev_dbg(dev, "can't create nvram file? %d\n", retval);
46364 diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
46365 index d049393..bb20be0 100644
46366 --- a/drivers/rtc/rtc-dev.c
46367 +++ b/drivers/rtc/rtc-dev.c
46368 @@ -16,6 +16,7 @@
46369 #include <linux/module.h>
46370 #include <linux/rtc.h>
46371 #include <linux/sched.h>
46372 +#include <linux/grsecurity.h>
46373 #include "rtc-core.h"
46374
46375 static dev_t rtc_devt;
46376 @@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
46377 if (copy_from_user(&tm, uarg, sizeof(tm)))
46378 return -EFAULT;
46379
46380 + gr_log_timechange();
46381 +
46382 return rtc_set_time(rtc, &tm);
46383
46384 case RTC_PIE_ON:
46385 diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
46386 index ca18fd1..055e42d 100644
46387 --- a/drivers/rtc/rtc-ds1307.c
46388 +++ b/drivers/rtc/rtc-ds1307.c
46389 @@ -107,7 +107,7 @@ struct ds1307 {
46390 u8 offset; /* register's offset */
46391 u8 regs[11];
46392 u16 nvram_offset;
46393 - struct bin_attribute *nvram;
46394 + bin_attribute_no_const *nvram;
46395 enum ds_type type;
46396 unsigned long flags;
46397 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
46398 diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
46399 index fcb0329..d77b7f2 100644
46400 --- a/drivers/rtc/rtc-m48t59.c
46401 +++ b/drivers/rtc/rtc-m48t59.c
46402 @@ -483,7 +483,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
46403 if (IS_ERR(m48t59->rtc))
46404 return PTR_ERR(m48t59->rtc);
46405
46406 - m48t59_nvram_attr.size = pdata->offset;
46407 + pax_open_kernel();
46408 + *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
46409 + pax_close_kernel();
46410
46411 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
46412 if (ret)
46413 diff --git a/drivers/scsi/aic7xxx/aic79xx_pci.c b/drivers/scsi/aic7xxx/aic79xx_pci.c
46414 index 14b5f8d..cc9bd26 100644
46415 --- a/drivers/scsi/aic7xxx/aic79xx_pci.c
46416 +++ b/drivers/scsi/aic7xxx/aic79xx_pci.c
46417 @@ -827,7 +827,7 @@ ahd_pci_intr(struct ahd_softc *ahd)
46418 for (bit = 0; bit < 8; bit++) {
46419
46420 if ((pci_status[i] & (0x1 << bit)) != 0) {
46421 - static const char *s;
46422 + const char *s;
46423
46424 s = pci_status_strings[bit];
46425 if (i == 7/*TARG*/ && bit == 3)
46426 @@ -887,23 +887,15 @@ ahd_pci_split_intr(struct ahd_softc *ahd, u_int intstat)
46427
46428 for (bit = 0; bit < 8; bit++) {
46429
46430 - if ((split_status[i] & (0x1 << bit)) != 0) {
46431 - static const char *s;
46432 -
46433 - s = split_status_strings[bit];
46434 - printk(s, ahd_name(ahd),
46435 + if ((split_status[i] & (0x1 << bit)) != 0)
46436 + printk(split_status_strings[bit], ahd_name(ahd),
46437 split_status_source[i]);
46438 - }
46439
46440 if (i > 1)
46441 continue;
46442
46443 - if ((sg_split_status[i] & (0x1 << bit)) != 0) {
46444 - static const char *s;
46445 -
46446 - s = split_status_strings[bit];
46447 - printk(s, ahd_name(ahd), "SG");
46448 - }
46449 + if ((sg_split_status[i] & (0x1 << bit)) != 0)
46450 + printk(split_status_strings[bit], ahd_name(ahd), "SG");
46451 }
46452 }
46453 /*
46454 diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
46455 index e693af6..2e525b6 100644
46456 --- a/drivers/scsi/bfa/bfa_fcpim.h
46457 +++ b/drivers/scsi/bfa/bfa_fcpim.h
46458 @@ -36,7 +36,7 @@ struct bfa_iotag_s {
46459
46460 struct bfa_itn_s {
46461 bfa_isr_func_t isr;
46462 -};
46463 +} __no_const;
46464
46465 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
46466 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
46467 diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
46468 index 90814fe..4384138 100644
46469 --- a/drivers/scsi/bfa/bfa_ioc.h
46470 +++ b/drivers/scsi/bfa/bfa_ioc.h
46471 @@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
46472 bfa_ioc_disable_cbfn_t disable_cbfn;
46473 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
46474 bfa_ioc_reset_cbfn_t reset_cbfn;
46475 -};
46476 +} __no_const;
46477
46478 /*
46479 * IOC event notification mechanism.
46480 @@ -352,7 +352,7 @@ struct bfa_ioc_hwif_s {
46481 void (*ioc_set_alt_fwstate) (struct bfa_ioc_s *ioc,
46482 enum bfi_ioc_state fwstate);
46483 enum bfi_ioc_state (*ioc_get_alt_fwstate) (struct bfa_ioc_s *ioc);
46484 -};
46485 +} __no_const;
46486
46487 /*
46488 * Queue element to wait for room in request queue. FIFO order is
46489 diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
46490 index c9382d6..6619864 100644
46491 --- a/drivers/scsi/fcoe/fcoe_sysfs.c
46492 +++ b/drivers/scsi/fcoe/fcoe_sysfs.c
46493 @@ -33,8 +33,8 @@
46494 */
46495 #include "libfcoe.h"
46496
46497 -static atomic_t ctlr_num;
46498 -static atomic_t fcf_num;
46499 +static atomic_unchecked_t ctlr_num;
46500 +static atomic_unchecked_t fcf_num;
46501
46502 /*
46503 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
46504 @@ -681,7 +681,7 @@ struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
46505 if (!ctlr)
46506 goto out;
46507
46508 - ctlr->id = atomic_inc_return(&ctlr_num) - 1;
46509 + ctlr->id = atomic_inc_return_unchecked(&ctlr_num) - 1;
46510 ctlr->f = f;
46511 ctlr->mode = FIP_CONN_TYPE_FABRIC;
46512 INIT_LIST_HEAD(&ctlr->fcfs);
46513 @@ -898,7 +898,7 @@ struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
46514 fcf->dev.parent = &ctlr->dev;
46515 fcf->dev.bus = &fcoe_bus_type;
46516 fcf->dev.type = &fcoe_fcf_device_type;
46517 - fcf->id = atomic_inc_return(&fcf_num) - 1;
46518 + fcf->id = atomic_inc_return_unchecked(&fcf_num) - 1;
46519 fcf->state = FCOE_FCF_STATE_UNKNOWN;
46520
46521 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
46522 @@ -934,8 +934,8 @@ int __init fcoe_sysfs_setup(void)
46523 {
46524 int error;
46525
46526 - atomic_set(&ctlr_num, 0);
46527 - atomic_set(&fcf_num, 0);
46528 + atomic_set_unchecked(&ctlr_num, 0);
46529 + atomic_set_unchecked(&fcf_num, 0);
46530
46531 error = bus_register(&fcoe_bus_type);
46532 if (error)
46533 diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
46534 index 3cafe0d..f1e87f8 100644
46535 --- a/drivers/scsi/hosts.c
46536 +++ b/drivers/scsi/hosts.c
46537 @@ -42,7 +42,7 @@
46538 #include "scsi_logging.h"
46539
46540
46541 -static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
46542 +static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
46543
46544
46545 static void scsi_host_cls_release(struct device *dev)
46546 @@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
46547 * subtract one because we increment first then return, but we need to
46548 * know what the next host number was before increment
46549 */
46550 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
46551 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
46552 shost->dma_channel = 0xff;
46553
46554 /* These three are default values which can be overridden */
46555 diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
46556 index 0eb0940..3ca9b79 100644
46557 --- a/drivers/scsi/hpsa.c
46558 +++ b/drivers/scsi/hpsa.c
46559 @@ -579,7 +579,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
46560 unsigned long flags;
46561
46562 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
46563 - return h->access.command_completed(h, q);
46564 + return h->access->command_completed(h, q);
46565
46566 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
46567 a = rq->head[rq->current_entry];
46568 @@ -3445,7 +3445,7 @@ static void start_io(struct ctlr_info *h)
46569 while (!list_empty(&h->reqQ)) {
46570 c = list_entry(h->reqQ.next, struct CommandList, list);
46571 /* can't do anything if fifo is full */
46572 - if ((h->access.fifo_full(h))) {
46573 + if ((h->access->fifo_full(h))) {
46574 dev_warn(&h->pdev->dev, "fifo full\n");
46575 break;
46576 }
46577 @@ -3467,7 +3467,7 @@ static void start_io(struct ctlr_info *h)
46578
46579 /* Tell the controller execute command */
46580 spin_unlock_irqrestore(&h->lock, flags);
46581 - h->access.submit_command(h, c);
46582 + h->access->submit_command(h, c);
46583 spin_lock_irqsave(&h->lock, flags);
46584 }
46585 spin_unlock_irqrestore(&h->lock, flags);
46586 @@ -3475,17 +3475,17 @@ static void start_io(struct ctlr_info *h)
46587
46588 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
46589 {
46590 - return h->access.command_completed(h, q);
46591 + return h->access->command_completed(h, q);
46592 }
46593
46594 static inline bool interrupt_pending(struct ctlr_info *h)
46595 {
46596 - return h->access.intr_pending(h);
46597 + return h->access->intr_pending(h);
46598 }
46599
46600 static inline long interrupt_not_for_us(struct ctlr_info *h)
46601 {
46602 - return (h->access.intr_pending(h) == 0) ||
46603 + return (h->access->intr_pending(h) == 0) ||
46604 (h->interrupts_enabled == 0);
46605 }
46606
46607 @@ -4387,7 +4387,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
46608 if (prod_index < 0)
46609 return -ENODEV;
46610 h->product_name = products[prod_index].product_name;
46611 - h->access = *(products[prod_index].access);
46612 + h->access = products[prod_index].access;
46613
46614 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
46615 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
46616 @@ -4669,7 +4669,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
46617
46618 assert_spin_locked(&lockup_detector_lock);
46619 remove_ctlr_from_lockup_detector_list(h);
46620 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
46621 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
46622 spin_lock_irqsave(&h->lock, flags);
46623 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
46624 spin_unlock_irqrestore(&h->lock, flags);
46625 @@ -4846,7 +4846,7 @@ reinit_after_soft_reset:
46626 }
46627
46628 /* make sure the board interrupts are off */
46629 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
46630 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
46631
46632 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
46633 goto clean2;
46634 @@ -4880,7 +4880,7 @@ reinit_after_soft_reset:
46635 * fake ones to scoop up any residual completions.
46636 */
46637 spin_lock_irqsave(&h->lock, flags);
46638 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
46639 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
46640 spin_unlock_irqrestore(&h->lock, flags);
46641 free_irqs(h);
46642 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
46643 @@ -4899,9 +4899,9 @@ reinit_after_soft_reset:
46644 dev_info(&h->pdev->dev, "Board READY.\n");
46645 dev_info(&h->pdev->dev,
46646 "Waiting for stale completions to drain.\n");
46647 - h->access.set_intr_mask(h, HPSA_INTR_ON);
46648 + h->access->set_intr_mask(h, HPSA_INTR_ON);
46649 msleep(10000);
46650 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
46651 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
46652
46653 rc = controller_reset_failed(h->cfgtable);
46654 if (rc)
46655 @@ -4922,7 +4922,7 @@ reinit_after_soft_reset:
46656 }
46657
46658 /* Turn the interrupts on so we can service requests */
46659 - h->access.set_intr_mask(h, HPSA_INTR_ON);
46660 + h->access->set_intr_mask(h, HPSA_INTR_ON);
46661
46662 hpsa_hba_inquiry(h);
46663 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
46664 @@ -4977,7 +4977,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
46665 * To write all data in the battery backed cache to disks
46666 */
46667 hpsa_flush_cache(h);
46668 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
46669 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
46670 hpsa_free_irqs_and_disable_msix(h);
46671 }
46672
46673 @@ -5145,7 +5145,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
46674 return;
46675 }
46676 /* Change the access methods to the performant access methods */
46677 - h->access = SA5_performant_access;
46678 + h->access = &SA5_performant_access;
46679 h->transMethod = CFGTBL_Trans_Performant;
46680 }
46681
46682 diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
46683 index bc85e72..ae04a39 100644
46684 --- a/drivers/scsi/hpsa.h
46685 +++ b/drivers/scsi/hpsa.h
46686 @@ -79,7 +79,7 @@ struct ctlr_info {
46687 unsigned int msix_vector;
46688 unsigned int msi_vector;
46689 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
46690 - struct access_method access;
46691 + struct access_method *access;
46692
46693 /* queue and queue Info */
46694 struct list_head reqQ;
46695 diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
46696 index 5879929..32b241d 100644
46697 --- a/drivers/scsi/libfc/fc_exch.c
46698 +++ b/drivers/scsi/libfc/fc_exch.c
46699 @@ -100,12 +100,12 @@ struct fc_exch_mgr {
46700 u16 pool_max_index;
46701
46702 struct {
46703 - atomic_t no_free_exch;
46704 - atomic_t no_free_exch_xid;
46705 - atomic_t xid_not_found;
46706 - atomic_t xid_busy;
46707 - atomic_t seq_not_found;
46708 - atomic_t non_bls_resp;
46709 + atomic_unchecked_t no_free_exch;
46710 + atomic_unchecked_t no_free_exch_xid;
46711 + atomic_unchecked_t xid_not_found;
46712 + atomic_unchecked_t xid_busy;
46713 + atomic_unchecked_t seq_not_found;
46714 + atomic_unchecked_t non_bls_resp;
46715 } stats;
46716 };
46717
46718 @@ -736,7 +736,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
46719 /* allocate memory for exchange */
46720 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
46721 if (!ep) {
46722 - atomic_inc(&mp->stats.no_free_exch);
46723 + atomic_inc_unchecked(&mp->stats.no_free_exch);
46724 goto out;
46725 }
46726 memset(ep, 0, sizeof(*ep));
46727 @@ -797,7 +797,7 @@ out:
46728 return ep;
46729 err:
46730 spin_unlock_bh(&pool->lock);
46731 - atomic_inc(&mp->stats.no_free_exch_xid);
46732 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
46733 mempool_free(ep, mp->ep_pool);
46734 return NULL;
46735 }
46736 @@ -940,7 +940,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
46737 xid = ntohs(fh->fh_ox_id); /* we originated exch */
46738 ep = fc_exch_find(mp, xid);
46739 if (!ep) {
46740 - atomic_inc(&mp->stats.xid_not_found);
46741 + atomic_inc_unchecked(&mp->stats.xid_not_found);
46742 reject = FC_RJT_OX_ID;
46743 goto out;
46744 }
46745 @@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
46746 ep = fc_exch_find(mp, xid);
46747 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
46748 if (ep) {
46749 - atomic_inc(&mp->stats.xid_busy);
46750 + atomic_inc_unchecked(&mp->stats.xid_busy);
46751 reject = FC_RJT_RX_ID;
46752 goto rel;
46753 }
46754 @@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
46755 }
46756 xid = ep->xid; /* get our XID */
46757 } else if (!ep) {
46758 - atomic_inc(&mp->stats.xid_not_found);
46759 + atomic_inc_unchecked(&mp->stats.xid_not_found);
46760 reject = FC_RJT_RX_ID; /* XID not found */
46761 goto out;
46762 }
46763 @@ -998,7 +998,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
46764 } else {
46765 sp = &ep->seq;
46766 if (sp->id != fh->fh_seq_id) {
46767 - atomic_inc(&mp->stats.seq_not_found);
46768 + atomic_inc_unchecked(&mp->stats.seq_not_found);
46769 if (f_ctl & FC_FC_END_SEQ) {
46770 /*
46771 * Update sequence_id based on incoming last
46772 @@ -1448,22 +1448,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
46773
46774 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
46775 if (!ep) {
46776 - atomic_inc(&mp->stats.xid_not_found);
46777 + atomic_inc_unchecked(&mp->stats.xid_not_found);
46778 goto out;
46779 }
46780 if (ep->esb_stat & ESB_ST_COMPLETE) {
46781 - atomic_inc(&mp->stats.xid_not_found);
46782 + atomic_inc_unchecked(&mp->stats.xid_not_found);
46783 goto rel;
46784 }
46785 if (ep->rxid == FC_XID_UNKNOWN)
46786 ep->rxid = ntohs(fh->fh_rx_id);
46787 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
46788 - atomic_inc(&mp->stats.xid_not_found);
46789 + atomic_inc_unchecked(&mp->stats.xid_not_found);
46790 goto rel;
46791 }
46792 if (ep->did != ntoh24(fh->fh_s_id) &&
46793 ep->did != FC_FID_FLOGI) {
46794 - atomic_inc(&mp->stats.xid_not_found);
46795 + atomic_inc_unchecked(&mp->stats.xid_not_found);
46796 goto rel;
46797 }
46798 sof = fr_sof(fp);
46799 @@ -1472,7 +1472,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
46800 sp->ssb_stat |= SSB_ST_RESP;
46801 sp->id = fh->fh_seq_id;
46802 } else if (sp->id != fh->fh_seq_id) {
46803 - atomic_inc(&mp->stats.seq_not_found);
46804 + atomic_inc_unchecked(&mp->stats.seq_not_found);
46805 goto rel;
46806 }
46807
46808 @@ -1536,9 +1536,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
46809 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
46810
46811 if (!sp)
46812 - atomic_inc(&mp->stats.xid_not_found);
46813 + atomic_inc_unchecked(&mp->stats.xid_not_found);
46814 else
46815 - atomic_inc(&mp->stats.non_bls_resp);
46816 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
46817
46818 fc_frame_free(fp);
46819 }
46820 @@ -2185,13 +2185,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
46821
46822 list_for_each_entry(ema, &lport->ema_list, ema_list) {
46823 mp = ema->mp;
46824 - st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
46825 + st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
46826 st->fc_no_free_exch_xid +=
46827 - atomic_read(&mp->stats.no_free_exch_xid);
46828 - st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
46829 - st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
46830 - st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
46831 - st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
46832 + atomic_read_unchecked(&mp->stats.no_free_exch_xid);
46833 + st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
46834 + st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
46835 + st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
46836 + st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
46837 }
46838 }
46839 EXPORT_SYMBOL(fc_exch_update_stats);
46840 diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
46841 index d289583..b745eec 100644
46842 --- a/drivers/scsi/libsas/sas_ata.c
46843 +++ b/drivers/scsi/libsas/sas_ata.c
46844 @@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
46845 .postreset = ata_std_postreset,
46846 .error_handler = ata_std_error_handler,
46847 .post_internal_cmd = sas_ata_post_internal,
46848 - .qc_defer = ata_std_qc_defer,
46849 + .qc_defer = ata_std_qc_defer,
46850 .qc_prep = ata_noop_qc_prep,
46851 .qc_issue = sas_ata_qc_issue,
46852 .qc_fill_rtf = sas_ata_qc_fill_rtf,
46853 diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
46854 index 4e1b75c..0bbdfa9 100644
46855 --- a/drivers/scsi/lpfc/lpfc.h
46856 +++ b/drivers/scsi/lpfc/lpfc.h
46857 @@ -432,7 +432,7 @@ struct lpfc_vport {
46858 struct dentry *debug_nodelist;
46859 struct dentry *vport_debugfs_root;
46860 struct lpfc_debugfs_trc *disc_trc;
46861 - atomic_t disc_trc_cnt;
46862 + atomic_unchecked_t disc_trc_cnt;
46863 #endif
46864 uint8_t stat_data_enabled;
46865 uint8_t stat_data_blocked;
46866 @@ -865,8 +865,8 @@ struct lpfc_hba {
46867 struct timer_list fabric_block_timer;
46868 unsigned long bit_flags;
46869 #define FABRIC_COMANDS_BLOCKED 0
46870 - atomic_t num_rsrc_err;
46871 - atomic_t num_cmd_success;
46872 + atomic_unchecked_t num_rsrc_err;
46873 + atomic_unchecked_t num_cmd_success;
46874 unsigned long last_rsrc_error_time;
46875 unsigned long last_ramp_down_time;
46876 unsigned long last_ramp_up_time;
46877 @@ -902,7 +902,7 @@ struct lpfc_hba {
46878
46879 struct dentry *debug_slow_ring_trc;
46880 struct lpfc_debugfs_trc *slow_ring_trc;
46881 - atomic_t slow_ring_trc_cnt;
46882 + atomic_unchecked_t slow_ring_trc_cnt;
46883 /* iDiag debugfs sub-directory */
46884 struct dentry *idiag_root;
46885 struct dentry *idiag_pci_cfg;
46886 diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
46887 index 60084e6..0e2e700 100644
46888 --- a/drivers/scsi/lpfc/lpfc_debugfs.c
46889 +++ b/drivers/scsi/lpfc/lpfc_debugfs.c
46890 @@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
46891
46892 #include <linux/debugfs.h>
46893
46894 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
46895 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
46896 static unsigned long lpfc_debugfs_start_time = 0L;
46897
46898 /* iDiag */
46899 @@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
46900 lpfc_debugfs_enable = 0;
46901
46902 len = 0;
46903 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
46904 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
46905 (lpfc_debugfs_max_disc_trc - 1);
46906 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
46907 dtp = vport->disc_trc + i;
46908 @@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
46909 lpfc_debugfs_enable = 0;
46910
46911 len = 0;
46912 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
46913 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
46914 (lpfc_debugfs_max_slow_ring_trc - 1);
46915 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
46916 dtp = phba->slow_ring_trc + i;
46917 @@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
46918 !vport || !vport->disc_trc)
46919 return;
46920
46921 - index = atomic_inc_return(&vport->disc_trc_cnt) &
46922 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
46923 (lpfc_debugfs_max_disc_trc - 1);
46924 dtp = vport->disc_trc + index;
46925 dtp->fmt = fmt;
46926 dtp->data1 = data1;
46927 dtp->data2 = data2;
46928 dtp->data3 = data3;
46929 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
46930 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
46931 dtp->jif = jiffies;
46932 #endif
46933 return;
46934 @@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
46935 !phba || !phba->slow_ring_trc)
46936 return;
46937
46938 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
46939 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
46940 (lpfc_debugfs_max_slow_ring_trc - 1);
46941 dtp = phba->slow_ring_trc + index;
46942 dtp->fmt = fmt;
46943 dtp->data1 = data1;
46944 dtp->data2 = data2;
46945 dtp->data3 = data3;
46946 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
46947 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
46948 dtp->jif = jiffies;
46949 #endif
46950 return;
46951 @@ -4168,7 +4168,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
46952 "slow_ring buffer\n");
46953 goto debug_failed;
46954 }
46955 - atomic_set(&phba->slow_ring_trc_cnt, 0);
46956 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
46957 memset(phba->slow_ring_trc, 0,
46958 (sizeof(struct lpfc_debugfs_trc) *
46959 lpfc_debugfs_max_slow_ring_trc));
46960 @@ -4214,7 +4214,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
46961 "buffer\n");
46962 goto debug_failed;
46963 }
46964 - atomic_set(&vport->disc_trc_cnt, 0);
46965 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
46966
46967 snprintf(name, sizeof(name), "discovery_trace");
46968 vport->debug_disc_trc =
46969 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
46970 index 647f5bf..d0068b9 100644
46971 --- a/drivers/scsi/lpfc/lpfc_init.c
46972 +++ b/drivers/scsi/lpfc/lpfc_init.c
46973 @@ -10952,8 +10952,10 @@ lpfc_init(void)
46974 "misc_register returned with status %d", error);
46975
46976 if (lpfc_enable_npiv) {
46977 - lpfc_transport_functions.vport_create = lpfc_vport_create;
46978 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
46979 + pax_open_kernel();
46980 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
46981 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
46982 + pax_close_kernel();
46983 }
46984 lpfc_transport_template =
46985 fc_attach_transport(&lpfc_transport_functions);
46986 diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
46987 index c913e8c..d34a119 100644
46988 --- a/drivers/scsi/lpfc/lpfc_scsi.c
46989 +++ b/drivers/scsi/lpfc/lpfc_scsi.c
46990 @@ -353,7 +353,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
46991 uint32_t evt_posted;
46992
46993 spin_lock_irqsave(&phba->hbalock, flags);
46994 - atomic_inc(&phba->num_rsrc_err);
46995 + atomic_inc_unchecked(&phba->num_rsrc_err);
46996 phba->last_rsrc_error_time = jiffies;
46997
46998 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
46999 @@ -394,7 +394,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
47000 unsigned long flags;
47001 struct lpfc_hba *phba = vport->phba;
47002 uint32_t evt_posted;
47003 - atomic_inc(&phba->num_cmd_success);
47004 + atomic_inc_unchecked(&phba->num_cmd_success);
47005
47006 if (vport->cfg_lun_queue_depth <= queue_depth)
47007 return;
47008 @@ -438,8 +438,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
47009 unsigned long num_rsrc_err, num_cmd_success;
47010 int i;
47011
47012 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
47013 - num_cmd_success = atomic_read(&phba->num_cmd_success);
47014 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
47015 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
47016
47017 /*
47018 * The error and success command counters are global per
47019 @@ -467,8 +467,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
47020 }
47021 }
47022 lpfc_destroy_vport_work_array(phba, vports);
47023 - atomic_set(&phba->num_rsrc_err, 0);
47024 - atomic_set(&phba->num_cmd_success, 0);
47025 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
47026 + atomic_set_unchecked(&phba->num_cmd_success, 0);
47027 }
47028
47029 /**
47030 @@ -502,8 +502,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
47031 }
47032 }
47033 lpfc_destroy_vport_work_array(phba, vports);
47034 - atomic_set(&phba->num_rsrc_err, 0);
47035 - atomic_set(&phba->num_cmd_success, 0);
47036 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
47037 + atomic_set_unchecked(&phba->num_cmd_success, 0);
47038 }
47039
47040 /**
47041 diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
47042 index 7f0af4f..193ac3e 100644
47043 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
47044 +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
47045 @@ -1557,7 +1557,7 @@ _scsih_get_resync(struct device *dev)
47046 {
47047 struct scsi_device *sdev = to_scsi_device(dev);
47048 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
47049 - static struct _raid_device *raid_device;
47050 + struct _raid_device *raid_device;
47051 unsigned long flags;
47052 Mpi2RaidVolPage0_t vol_pg0;
47053 Mpi2ConfigReply_t mpi_reply;
47054 @@ -1609,7 +1609,7 @@ _scsih_get_state(struct device *dev)
47055 {
47056 struct scsi_device *sdev = to_scsi_device(dev);
47057 struct MPT2SAS_ADAPTER *ioc = shost_priv(sdev->host);
47058 - static struct _raid_device *raid_device;
47059 + struct _raid_device *raid_device;
47060 unsigned long flags;
47061 Mpi2RaidVolPage0_t vol_pg0;
47062 Mpi2ConfigReply_t mpi_reply;
47063 @@ -6637,7 +6637,7 @@ _scsih_sas_ir_operation_status_event(struct MPT2SAS_ADAPTER *ioc,
47064 struct fw_event_work *fw_event)
47065 {
47066 Mpi2EventDataIrOperationStatus_t *event_data = fw_event->event_data;
47067 - static struct _raid_device *raid_device;
47068 + struct _raid_device *raid_device;
47069 unsigned long flags;
47070 u16 handle;
47071
47072 @@ -7108,7 +7108,7 @@ _scsih_scan_for_devices_after_reset(struct MPT2SAS_ADAPTER *ioc)
47073 u64 sas_address;
47074 struct _sas_device *sas_device;
47075 struct _sas_node *expander_device;
47076 - static struct _raid_device *raid_device;
47077 + struct _raid_device *raid_device;
47078 u8 retry_count;
47079 unsigned long flags;
47080
47081 diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
47082 index a38f71b..f3bc572 100644
47083 --- a/drivers/scsi/pmcraid.c
47084 +++ b/drivers/scsi/pmcraid.c
47085 @@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
47086 res->scsi_dev = scsi_dev;
47087 scsi_dev->hostdata = res;
47088 res->change_detected = 0;
47089 - atomic_set(&res->read_failures, 0);
47090 - atomic_set(&res->write_failures, 0);
47091 + atomic_set_unchecked(&res->read_failures, 0);
47092 + atomic_set_unchecked(&res->write_failures, 0);
47093 rc = 0;
47094 }
47095 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
47096 @@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
47097
47098 /* If this was a SCSI read/write command keep count of errors */
47099 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
47100 - atomic_inc(&res->read_failures);
47101 + atomic_inc_unchecked(&res->read_failures);
47102 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
47103 - atomic_inc(&res->write_failures);
47104 + atomic_inc_unchecked(&res->write_failures);
47105
47106 if (!RES_IS_GSCSI(res->cfg_entry) &&
47107 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
47108 @@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
47109 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
47110 * hrrq_id assigned here in queuecommand
47111 */
47112 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
47113 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
47114 pinstance->num_hrrq;
47115 cmd->cmd_done = pmcraid_io_done;
47116
47117 @@ -3846,7 +3846,7 @@ static long pmcraid_ioctl_passthrough(
47118 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
47119 * hrrq_id assigned here in queuecommand
47120 */
47121 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
47122 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
47123 pinstance->num_hrrq;
47124
47125 if (request_size) {
47126 @@ -4484,7 +4484,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
47127
47128 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
47129 /* add resources only after host is added into system */
47130 - if (!atomic_read(&pinstance->expose_resources))
47131 + if (!atomic_read_unchecked(&pinstance->expose_resources))
47132 return;
47133
47134 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
47135 @@ -5311,8 +5311,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
47136 init_waitqueue_head(&pinstance->reset_wait_q);
47137
47138 atomic_set(&pinstance->outstanding_cmds, 0);
47139 - atomic_set(&pinstance->last_message_id, 0);
47140 - atomic_set(&pinstance->expose_resources, 0);
47141 + atomic_set_unchecked(&pinstance->last_message_id, 0);
47142 + atomic_set_unchecked(&pinstance->expose_resources, 0);
47143
47144 INIT_LIST_HEAD(&pinstance->free_res_q);
47145 INIT_LIST_HEAD(&pinstance->used_res_q);
47146 @@ -6025,7 +6025,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
47147 /* Schedule worker thread to handle CCN and take care of adding and
47148 * removing devices to OS
47149 */
47150 - atomic_set(&pinstance->expose_resources, 1);
47151 + atomic_set_unchecked(&pinstance->expose_resources, 1);
47152 schedule_work(&pinstance->worker_q);
47153 return rc;
47154
47155 diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
47156 index e1d150f..6c6df44 100644
47157 --- a/drivers/scsi/pmcraid.h
47158 +++ b/drivers/scsi/pmcraid.h
47159 @@ -748,7 +748,7 @@ struct pmcraid_instance {
47160 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
47161
47162 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
47163 - atomic_t last_message_id;
47164 + atomic_unchecked_t last_message_id;
47165
47166 /* configuration table */
47167 struct pmcraid_config_table *cfg_table;
47168 @@ -777,7 +777,7 @@ struct pmcraid_instance {
47169 atomic_t outstanding_cmds;
47170
47171 /* should add/delete resources to mid-layer now ?*/
47172 - atomic_t expose_resources;
47173 + atomic_unchecked_t expose_resources;
47174
47175
47176
47177 @@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
47178 struct pmcraid_config_table_entry_ext cfg_entry_ext;
47179 };
47180 struct scsi_device *scsi_dev; /* Link scsi_device structure */
47181 - atomic_t read_failures; /* count of failed READ commands */
47182 - atomic_t write_failures; /* count of failed WRITE commands */
47183 + atomic_unchecked_t read_failures; /* count of failed READ commands */
47184 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
47185
47186 /* To indicate add/delete/modify during CCN */
47187 u8 change_detected;
47188 diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
47189 index 5f174b8..98d32b0 100644
47190 --- a/drivers/scsi/qla2xxx/qla_attr.c
47191 +++ b/drivers/scsi/qla2xxx/qla_attr.c
47192 @@ -2040,7 +2040,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
47193 return 0;
47194 }
47195
47196 -struct fc_function_template qla2xxx_transport_functions = {
47197 +fc_function_template_no_const qla2xxx_transport_functions = {
47198
47199 .show_host_node_name = 1,
47200 .show_host_port_name = 1,
47201 @@ -2088,7 +2088,7 @@ struct fc_function_template qla2xxx_transport_functions = {
47202 .bsg_timeout = qla24xx_bsg_timeout,
47203 };
47204
47205 -struct fc_function_template qla2xxx_transport_vport_functions = {
47206 +fc_function_template_no_const qla2xxx_transport_vport_functions = {
47207
47208 .show_host_node_name = 1,
47209 .show_host_port_name = 1,
47210 diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
47211 index 4446bf5..9a3574d 100644
47212 --- a/drivers/scsi/qla2xxx/qla_gbl.h
47213 +++ b/drivers/scsi/qla2xxx/qla_gbl.h
47214 @@ -538,8 +538,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
47215 struct device_attribute;
47216 extern struct device_attribute *qla2x00_host_attrs[];
47217 struct fc_function_template;
47218 -extern struct fc_function_template qla2xxx_transport_functions;
47219 -extern struct fc_function_template qla2xxx_transport_vport_functions;
47220 +extern fc_function_template_no_const qla2xxx_transport_functions;
47221 +extern fc_function_template_no_const qla2xxx_transport_vport_functions;
47222 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
47223 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
47224 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
47225 diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
47226 index 9f01bbb..5e1dcee 100644
47227 --- a/drivers/scsi/qla2xxx/qla_os.c
47228 +++ b/drivers/scsi/qla2xxx/qla_os.c
47229 @@ -1572,8 +1572,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
47230 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
47231 /* Ok, a 64bit DMA mask is applicable. */
47232 ha->flags.enable_64bit_addressing = 1;
47233 - ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
47234 - ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
47235 + pax_open_kernel();
47236 + *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
47237 + *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
47238 + pax_close_kernel();
47239 return;
47240 }
47241 }
47242 diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
47243 index 41327d4..feb03d479 100644
47244 --- a/drivers/scsi/qla4xxx/ql4_def.h
47245 +++ b/drivers/scsi/qla4xxx/ql4_def.h
47246 @@ -296,7 +296,7 @@ struct ddb_entry {
47247 * (4000 only) */
47248 atomic_t relogin_timer; /* Max Time to wait for
47249 * relogin to complete */
47250 - atomic_t relogin_retry_count; /* Num of times relogin has been
47251 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
47252 * retried */
47253 uint32_t default_time2wait; /* Default Min time between
47254 * relogins (+aens) */
47255 diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
47256 index f8a0a26..ec03cee 100644
47257 --- a/drivers/scsi/qla4xxx/ql4_os.c
47258 +++ b/drivers/scsi/qla4xxx/ql4_os.c
47259 @@ -3066,12 +3066,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
47260 */
47261 if (!iscsi_is_session_online(cls_sess)) {
47262 /* Reset retry relogin timer */
47263 - atomic_inc(&ddb_entry->relogin_retry_count);
47264 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
47265 DEBUG2(ql4_printk(KERN_INFO, ha,
47266 "%s: index[%d] relogin timed out-retrying"
47267 " relogin (%d), retry (%d)\n", __func__,
47268 ddb_entry->fw_ddb_index,
47269 - atomic_read(&ddb_entry->relogin_retry_count),
47270 + atomic_read_unchecked(&ddb_entry->relogin_retry_count),
47271 ddb_entry->default_time2wait + 4));
47272 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
47273 atomic_set(&ddb_entry->retry_relogin_timer,
47274 @@ -5209,7 +5209,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
47275
47276 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
47277 atomic_set(&ddb_entry->relogin_timer, 0);
47278 - atomic_set(&ddb_entry->relogin_retry_count, 0);
47279 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
47280 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
47281 ddb_entry->default_relogin_timeout =
47282 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
47283 diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
47284 index eaa808e..95f8841 100644
47285 --- a/drivers/scsi/scsi.c
47286 +++ b/drivers/scsi/scsi.c
47287 @@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
47288 unsigned long timeout;
47289 int rtn = 0;
47290
47291 - atomic_inc(&cmd->device->iorequest_cnt);
47292 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
47293
47294 /* check if the device is still usable */
47295 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
47296 diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
47297 index d1549b7..2f60767 100644
47298 --- a/drivers/scsi/scsi_lib.c
47299 +++ b/drivers/scsi/scsi_lib.c
47300 @@ -1474,7 +1474,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
47301 shost = sdev->host;
47302 scsi_init_cmd_errh(cmd);
47303 cmd->result = DID_NO_CONNECT << 16;
47304 - atomic_inc(&cmd->device->iorequest_cnt);
47305 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
47306
47307 /*
47308 * SCSI request completion path will do scsi_device_unbusy(),
47309 @@ -1500,9 +1500,9 @@ static void scsi_softirq_done(struct request *rq)
47310
47311 INIT_LIST_HEAD(&cmd->eh_entry);
47312
47313 - atomic_inc(&cmd->device->iodone_cnt);
47314 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
47315 if (cmd->result)
47316 - atomic_inc(&cmd->device->ioerr_cnt);
47317 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
47318
47319 disposition = scsi_decide_disposition(cmd);
47320 if (disposition != SUCCESS &&
47321 diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
47322 index 40c6394..62356c2 100644
47323 --- a/drivers/scsi/scsi_sysfs.c
47324 +++ b/drivers/scsi/scsi_sysfs.c
47325 @@ -687,7 +687,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
47326 char *buf) \
47327 { \
47328 struct scsi_device *sdev = to_scsi_device(dev); \
47329 - unsigned long long count = atomic_read(&sdev->field); \
47330 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
47331 return snprintf(buf, 20, "0x%llx\n", count); \
47332 } \
47333 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
47334 diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
47335 index 84a1fdf..693b0d6 100644
47336 --- a/drivers/scsi/scsi_tgt_lib.c
47337 +++ b/drivers/scsi/scsi_tgt_lib.c
47338 @@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
47339 int err;
47340
47341 dprintk("%lx %u\n", uaddr, len);
47342 - err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
47343 + err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
47344 if (err) {
47345 /*
47346 * TODO: need to fixup sg_tablesize, max_segment_size,
47347 diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
47348 index 4628fd5..a94a1c2 100644
47349 --- a/drivers/scsi/scsi_transport_fc.c
47350 +++ b/drivers/scsi/scsi_transport_fc.c
47351 @@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
47352 * Netlink Infrastructure
47353 */
47354
47355 -static atomic_t fc_event_seq;
47356 +static atomic_unchecked_t fc_event_seq;
47357
47358 /**
47359 * fc_get_event_number - Obtain the next sequential FC event number
47360 @@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
47361 u32
47362 fc_get_event_number(void)
47363 {
47364 - return atomic_add_return(1, &fc_event_seq);
47365 + return atomic_add_return_unchecked(1, &fc_event_seq);
47366 }
47367 EXPORT_SYMBOL(fc_get_event_number);
47368
47369 @@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
47370 {
47371 int error;
47372
47373 - atomic_set(&fc_event_seq, 0);
47374 + atomic_set_unchecked(&fc_event_seq, 0);
47375
47376 error = transport_class_register(&fc_host_class);
47377 if (error)
47378 @@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
47379 char *cp;
47380
47381 *val = simple_strtoul(buf, &cp, 0);
47382 - if ((*cp && (*cp != '\n')) || (*val < 0))
47383 + if (*cp && (*cp != '\n'))
47384 return -EINVAL;
47385 /*
47386 * Check for overflow; dev_loss_tmo is u32
47387 diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
47388 index e4a989f..293090c 100644
47389 --- a/drivers/scsi/scsi_transport_iscsi.c
47390 +++ b/drivers/scsi/scsi_transport_iscsi.c
47391 @@ -79,7 +79,7 @@ struct iscsi_internal {
47392 struct transport_container session_cont;
47393 };
47394
47395 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
47396 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
47397 static struct workqueue_struct *iscsi_eh_timer_workq;
47398
47399 static DEFINE_IDA(iscsi_sess_ida);
47400 @@ -1737,7 +1737,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
47401 int err;
47402
47403 ihost = shost->shost_data;
47404 - session->sid = atomic_add_return(1, &iscsi_session_nr);
47405 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
47406
47407 if (target_id == ISCSI_MAX_TARGET) {
47408 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
47409 @@ -4077,7 +4077,7 @@ static __init int iscsi_transport_init(void)
47410 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
47411 ISCSI_TRANSPORT_VERSION);
47412
47413 - atomic_set(&iscsi_session_nr, 0);
47414 + atomic_set_unchecked(&iscsi_session_nr, 0);
47415
47416 err = class_register(&iscsi_transport_class);
47417 if (err)
47418 diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
47419 index f379c7f..e8fc69c 100644
47420 --- a/drivers/scsi/scsi_transport_srp.c
47421 +++ b/drivers/scsi/scsi_transport_srp.c
47422 @@ -33,7 +33,7 @@
47423 #include "scsi_transport_srp_internal.h"
47424
47425 struct srp_host_attrs {
47426 - atomic_t next_port_id;
47427 + atomic_unchecked_t next_port_id;
47428 };
47429 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
47430
47431 @@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
47432 struct Scsi_Host *shost = dev_to_shost(dev);
47433 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
47434
47435 - atomic_set(&srp_host->next_port_id, 0);
47436 + atomic_set_unchecked(&srp_host->next_port_id, 0);
47437 return 0;
47438 }
47439
47440 @@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
47441 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
47442 rport->roles = ids->roles;
47443
47444 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
47445 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
47446 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
47447
47448 transport_setup_device(&rport->dev);
47449 diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
47450 index 2634d69..fcf7a81 100644
47451 --- a/drivers/scsi/sd.c
47452 +++ b/drivers/scsi/sd.c
47453 @@ -2940,7 +2940,7 @@ static int sd_probe(struct device *dev)
47454 sdkp->disk = gd;
47455 sdkp->index = index;
47456 atomic_set(&sdkp->openers, 0);
47457 - atomic_set(&sdkp->device->ioerr_cnt, 0);
47458 + atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
47459
47460 if (!sdp->request_queue->rq_timeout) {
47461 if (sdp->type != TYPE_MOD)
47462 diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
47463 index df5e961..df6b97f 100644
47464 --- a/drivers/scsi/sg.c
47465 +++ b/drivers/scsi/sg.c
47466 @@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
47467 sdp->disk->disk_name,
47468 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
47469 NULL,
47470 - (char *)arg);
47471 + (char __user *)arg);
47472 case BLKTRACESTART:
47473 return blk_trace_startstop(sdp->device->request_queue, 1);
47474 case BLKTRACESTOP:
47475 diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
47476 index 9e039c6..ae9e800 100644
47477 --- a/drivers/spi/spi.c
47478 +++ b/drivers/spi/spi.c
47479 @@ -1762,7 +1762,7 @@ int spi_bus_unlock(struct spi_master *master)
47480 EXPORT_SYMBOL_GPL(spi_bus_unlock);
47481
47482 /* portable code must never pass more than 32 bytes */
47483 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
47484 +#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
47485
47486 static u8 *buf;
47487
47488 diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
47489 index 2c61783..4d49e4e 100644
47490 --- a/drivers/staging/android/timed_output.c
47491 +++ b/drivers/staging/android/timed_output.c
47492 @@ -25,7 +25,7 @@
47493 #include "timed_output.h"
47494
47495 static struct class *timed_output_class;
47496 -static atomic_t device_count;
47497 +static atomic_unchecked_t device_count;
47498
47499 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
47500 char *buf)
47501 @@ -63,7 +63,7 @@ static int create_timed_output_class(void)
47502 timed_output_class = class_create(THIS_MODULE, "timed_output");
47503 if (IS_ERR(timed_output_class))
47504 return PTR_ERR(timed_output_class);
47505 - atomic_set(&device_count, 0);
47506 + atomic_set_unchecked(&device_count, 0);
47507 timed_output_class->dev_groups = timed_output_groups;
47508 }
47509
47510 @@ -81,7 +81,7 @@ int timed_output_dev_register(struct timed_output_dev *tdev)
47511 if (ret < 0)
47512 return ret;
47513
47514 - tdev->index = atomic_inc_return(&device_count);
47515 + tdev->index = atomic_inc_return_unchecked(&device_count);
47516 tdev->dev = device_create(timed_output_class, NULL,
47517 MKDEV(0, tdev->index), NULL, "%s", tdev->name);
47518 if (IS_ERR(tdev->dev))
47519 diff --git a/drivers/staging/gdm724x/gdm_tty.c b/drivers/staging/gdm724x/gdm_tty.c
47520 index 0247a20..cb9595c 100644
47521 --- a/drivers/staging/gdm724x/gdm_tty.c
47522 +++ b/drivers/staging/gdm724x/gdm_tty.c
47523 @@ -45,7 +45,7 @@
47524 #define gdm_tty_send_control(n, r, v, d, l) (\
47525 n->tty_dev->send_control(n->tty_dev->priv_dev, r, v, d, l))
47526
47527 -#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && gdm->port.count)
47528 +#define GDM_TTY_READY(gdm) (gdm && gdm->tty_dev && atomic_read(&gdm->port.count))
47529
47530 static struct tty_driver *gdm_driver[TTY_MAX_COUNT];
47531 static struct gdm *gdm_table[TTY_MAX_COUNT][GDM_TTY_MINOR];
47532 diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
47533 index ef5064e..fce01db 100644
47534 --- a/drivers/staging/lustre/lnet/selftest/brw_test.c
47535 +++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
47536 @@ -478,13 +478,11 @@ brw_server_handle(struct srpc_server_rpc *rpc)
47537 return 0;
47538 }
47539
47540 -sfw_test_client_ops_t brw_test_client;
47541 -void brw_init_test_client(void)
47542 -{
47543 - brw_test_client.tso_init = brw_client_init;
47544 - brw_test_client.tso_fini = brw_client_fini;
47545 - brw_test_client.tso_prep_rpc = brw_client_prep_rpc;
47546 - brw_test_client.tso_done_rpc = brw_client_done_rpc;
47547 +sfw_test_client_ops_t brw_test_client = {
47548 + .tso_init = brw_client_init,
47549 + .tso_fini = brw_client_fini,
47550 + .tso_prep_rpc = brw_client_prep_rpc,
47551 + .tso_done_rpc = brw_client_done_rpc,
47552 };
47553
47554 srpc_service_t brw_test_service;
47555 diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
47556 index 483c785..e1a2a7b 100644
47557 --- a/drivers/staging/lustre/lnet/selftest/framework.c
47558 +++ b/drivers/staging/lustre/lnet/selftest/framework.c
47559 @@ -1635,12 +1635,10 @@ static srpc_service_t sfw_services[] =
47560
47561 extern sfw_test_client_ops_t ping_test_client;
47562 extern srpc_service_t ping_test_service;
47563 -extern void ping_init_test_client(void);
47564 extern void ping_init_test_service(void);
47565
47566 extern sfw_test_client_ops_t brw_test_client;
47567 extern srpc_service_t brw_test_service;
47568 -extern void brw_init_test_client(void);
47569 extern void brw_init_test_service(void);
47570
47571
47572 @@ -1684,12 +1682,10 @@ sfw_startup (void)
47573 INIT_LIST_HEAD(&sfw_data.fw_zombie_rpcs);
47574 INIT_LIST_HEAD(&sfw_data.fw_zombie_sessions);
47575
47576 - brw_init_test_client();
47577 brw_init_test_service();
47578 rc = sfw_register_test(&brw_test_service, &brw_test_client);
47579 LASSERT (rc == 0);
47580
47581 - ping_init_test_client();
47582 ping_init_test_service();
47583 rc = sfw_register_test(&ping_test_service, &ping_test_client);
47584 LASSERT (rc == 0);
47585 diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
47586 index f0f9194..b589047 100644
47587 --- a/drivers/staging/lustre/lnet/selftest/ping_test.c
47588 +++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
47589 @@ -210,14 +210,12 @@ ping_server_handle(struct srpc_server_rpc *rpc)
47590 return 0;
47591 }
47592
47593 -sfw_test_client_ops_t ping_test_client;
47594 -void ping_init_test_client(void)
47595 -{
47596 - ping_test_client.tso_init = ping_client_init;
47597 - ping_test_client.tso_fini = ping_client_fini;
47598 - ping_test_client.tso_prep_rpc = ping_client_prep_rpc;
47599 - ping_test_client.tso_done_rpc = ping_client_done_rpc;
47600 -}
47601 +sfw_test_client_ops_t ping_test_client = {
47602 + .tso_init = ping_client_init,
47603 + .tso_fini = ping_client_fini,
47604 + .tso_prep_rpc = ping_client_prep_rpc,
47605 + .tso_done_rpc = ping_client_done_rpc,
47606 +};
47607
47608 srpc_service_t ping_test_service;
47609 void ping_init_test_service(void)
47610 diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
47611 index 7020d9c..0d3b580 100644
47612 --- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
47613 +++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
47614 @@ -1141,7 +1141,7 @@ struct ldlm_callback_suite {
47615 ldlm_completion_callback lcs_completion;
47616 ldlm_blocking_callback lcs_blocking;
47617 ldlm_glimpse_callback lcs_glimpse;
47618 -};
47619 +} __no_const;
47620
47621 /* ldlm_lockd.c */
47622 int ldlm_del_waiting_lock(struct ldlm_lock *lock);
47623 diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
47624 index a612255..9a9e2dd 100644
47625 --- a/drivers/staging/lustre/lustre/include/obd.h
47626 +++ b/drivers/staging/lustre/lustre/include/obd.h
47627 @@ -1417,7 +1417,7 @@ struct md_ops {
47628 * lprocfs_alloc_md_stats() in obdclass/lprocfs_status.c. Also, add a
47629 * wrapper function in include/linux/obd_class.h.
47630 */
47631 -};
47632 +} __no_const;
47633
47634 struct lsm_operations {
47635 void (*lsm_free)(struct lov_stripe_md *);
47636 diff --git a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
47637 index fc6c977..df1f956 100644
47638 --- a/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
47639 +++ b/drivers/staging/lustre/lustre/libcfs/linux/linux-proc.c
47640 @@ -219,7 +219,7 @@ DECLARE_PROC_HANDLER(proc_debug_mb)
47641 int LL_PROC_PROTO(proc_console_max_delay_cs)
47642 {
47643 int rc, max_delay_cs;
47644 - ctl_table_t dummy = *table;
47645 + ctl_table_no_const dummy = *table;
47646 cfs_duration_t d;
47647
47648 dummy.data = &max_delay_cs;
47649 @@ -250,7 +250,7 @@ int LL_PROC_PROTO(proc_console_max_delay_cs)
47650 int LL_PROC_PROTO(proc_console_min_delay_cs)
47651 {
47652 int rc, min_delay_cs;
47653 - ctl_table_t dummy = *table;
47654 + ctl_table_no_const dummy = *table;
47655 cfs_duration_t d;
47656
47657 dummy.data = &min_delay_cs;
47658 @@ -281,7 +281,7 @@ int LL_PROC_PROTO(proc_console_min_delay_cs)
47659 int LL_PROC_PROTO(proc_console_backoff)
47660 {
47661 int rc, backoff;
47662 - ctl_table_t dummy = *table;
47663 + ctl_table_no_const dummy = *table;
47664
47665 dummy.data = &backoff;
47666 dummy.proc_handler = &proc_dointvec;
47667 diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
47668 index 3675020..e80d92c 100644
47669 --- a/drivers/staging/media/solo6x10/solo6x10-core.c
47670 +++ b/drivers/staging/media/solo6x10/solo6x10-core.c
47671 @@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
47672
47673 static int solo_sysfs_init(struct solo_dev *solo_dev)
47674 {
47675 - struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
47676 + bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
47677 struct device *dev = &solo_dev->dev;
47678 const char *driver;
47679 int i;
47680 diff --git a/drivers/staging/media/solo6x10/solo6x10-p2m.c b/drivers/staging/media/solo6x10/solo6x10-p2m.c
47681 index 3335941..2b26186 100644
47682 --- a/drivers/staging/media/solo6x10/solo6x10-p2m.c
47683 +++ b/drivers/staging/media/solo6x10/solo6x10-p2m.c
47684 @@ -77,7 +77,7 @@ int solo_p2m_dma_desc(struct solo_dev *solo_dev,
47685
47686 /* Get next ID. According to Softlogic, 6110 has problems on !=0 P2M */
47687 if (solo_dev->type != SOLO_DEV_6110 && multi_p2m) {
47688 - p2m_id = atomic_inc_return(&solo_dev->p2m_count) % SOLO_NR_P2M;
47689 + p2m_id = atomic_inc_return_unchecked(&solo_dev->p2m_count) % SOLO_NR_P2M;
47690 if (p2m_id < 0)
47691 p2m_id = -p2m_id;
47692 }
47693 diff --git a/drivers/staging/media/solo6x10/solo6x10.h b/drivers/staging/media/solo6x10/solo6x10.h
47694 index 6f91d2e..3f011d2 100644
47695 --- a/drivers/staging/media/solo6x10/solo6x10.h
47696 +++ b/drivers/staging/media/solo6x10/solo6x10.h
47697 @@ -238,7 +238,7 @@ struct solo_dev {
47698
47699 /* P2M DMA Engine */
47700 struct solo_p2m_dev p2m_dev[SOLO_NR_P2M];
47701 - atomic_t p2m_count;
47702 + atomic_unchecked_t p2m_count;
47703 int p2m_jiffies;
47704 unsigned int p2m_timeouts;
47705
47706 diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
47707 index e14a1bb..9cb9bbe 100644
47708 --- a/drivers/staging/octeon/ethernet-rx.c
47709 +++ b/drivers/staging/octeon/ethernet-rx.c
47710 @@ -419,11 +419,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
47711 /* Increment RX stats for virtual ports */
47712 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
47713 #ifdef CONFIG_64BIT
47714 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
47715 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
47716 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
47717 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
47718 #else
47719 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
47720 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
47721 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
47722 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
47723 #endif
47724 }
47725 netif_receive_skb(skb);
47726 @@ -434,9 +434,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
47727 dev->name);
47728 */
47729 #ifdef CONFIG_64BIT
47730 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
47731 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
47732 #else
47733 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
47734 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
47735 #endif
47736 dev_kfree_skb_irq(skb);
47737 }
47738 diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
47739 index c3a90e7..023619a 100644
47740 --- a/drivers/staging/octeon/ethernet.c
47741 +++ b/drivers/staging/octeon/ethernet.c
47742 @@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
47743 * since the RX tasklet also increments it.
47744 */
47745 #ifdef CONFIG_64BIT
47746 - atomic64_add(rx_status.dropped_packets,
47747 - (atomic64_t *)&priv->stats.rx_dropped);
47748 + atomic64_add_unchecked(rx_status.dropped_packets,
47749 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
47750 #else
47751 - atomic_add(rx_status.dropped_packets,
47752 - (atomic_t *)&priv->stats.rx_dropped);
47753 + atomic_add_unchecked(rx_status.dropped_packets,
47754 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
47755 #endif
47756 }
47757
47758 diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
47759 index 439c3c9..2d74293 100644
47760 --- a/drivers/staging/rtl8188eu/include/hal_intf.h
47761 +++ b/drivers/staging/rtl8188eu/include/hal_intf.h
47762 @@ -271,7 +271,7 @@ struct hal_ops {
47763 s32 (*c2h_handler)(struct adapter *padapter,
47764 struct c2h_evt_hdr *c2h_evt);
47765 c2h_id_filter c2h_id_filter_ccx;
47766 -};
47767 +} __no_const;
47768
47769 enum rt_eeprom_type {
47770 EEPROM_93C46,
47771 diff --git a/drivers/staging/rtl8188eu/include/rtw_io.h b/drivers/staging/rtl8188eu/include/rtw_io.h
47772 index eb6f0e5..e6a0958 100644
47773 --- a/drivers/staging/rtl8188eu/include/rtw_io.h
47774 +++ b/drivers/staging/rtl8188eu/include/rtw_io.h
47775 @@ -126,7 +126,7 @@ struct _io_ops {
47776 u32 (*_write_scsi)(struct intf_hdl *pintfhdl,u32 cnt, u8 *pmem);
47777 void (*_read_port_cancel)(struct intf_hdl *pintfhdl);
47778 void (*_write_port_cancel)(struct intf_hdl *pintfhdl);
47779 -};
47780 +} __no_const;
47781
47782 struct io_req {
47783 struct list_head list;
47784 diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
47785 index dc23395..cf7e9b1 100644
47786 --- a/drivers/staging/rtl8712/rtl871x_io.h
47787 +++ b/drivers/staging/rtl8712/rtl871x_io.h
47788 @@ -108,7 +108,7 @@ struct _io_ops {
47789 u8 *pmem);
47790 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
47791 u8 *pmem);
47792 -};
47793 +} __no_const;
47794
47795 struct io_req {
47796 struct list_head list;
47797 diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
47798 index 1f5088b..0e59820 100644
47799 --- a/drivers/staging/sbe-2t3e3/netdev.c
47800 +++ b/drivers/staging/sbe-2t3e3/netdev.c
47801 @@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
47802 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
47803
47804 if (rlen)
47805 - if (copy_to_user(data, &resp, rlen))
47806 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
47807 return -EFAULT;
47808
47809 return 0;
47810 diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
47811 index a863a98..d272795 100644
47812 --- a/drivers/staging/usbip/vhci.h
47813 +++ b/drivers/staging/usbip/vhci.h
47814 @@ -83,7 +83,7 @@ struct vhci_hcd {
47815 unsigned resuming:1;
47816 unsigned long re_timeout;
47817
47818 - atomic_t seqnum;
47819 + atomic_unchecked_t seqnum;
47820
47821 /*
47822 * NOTE:
47823 diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
47824 index d7974cb..d78076b 100644
47825 --- a/drivers/staging/usbip/vhci_hcd.c
47826 +++ b/drivers/staging/usbip/vhci_hcd.c
47827 @@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
47828
47829 spin_lock(&vdev->priv_lock);
47830
47831 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
47832 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
47833 if (priv->seqnum == 0xffff)
47834 dev_info(&urb->dev->dev, "seqnum max\n");
47835
47836 @@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
47837 return -ENOMEM;
47838 }
47839
47840 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
47841 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
47842 if (unlink->seqnum == 0xffff)
47843 pr_info("seqnum max\n");
47844
47845 @@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
47846 vdev->rhport = rhport;
47847 }
47848
47849 - atomic_set(&vhci->seqnum, 0);
47850 + atomic_set_unchecked(&vhci->seqnum, 0);
47851 spin_lock_init(&vhci->lock);
47852
47853 hcd->power_budget = 0; /* no limit */
47854 diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
47855 index d07fcb5..358e1e1 100644
47856 --- a/drivers/staging/usbip/vhci_rx.c
47857 +++ b/drivers/staging/usbip/vhci_rx.c
47858 @@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
47859 if (!urb) {
47860 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
47861 pr_info("max seqnum %d\n",
47862 - atomic_read(&the_controller->seqnum));
47863 + atomic_read_unchecked(&the_controller->seqnum));
47864 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
47865 return;
47866 }
47867 diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
47868 index 8acff44..bdb2fca 100644
47869 --- a/drivers/staging/vt6655/hostap.c
47870 +++ b/drivers/staging/vt6655/hostap.c
47871 @@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
47872 *
47873 */
47874
47875 +static net_device_ops_no_const apdev_netdev_ops;
47876 +
47877 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
47878 {
47879 PSDevice apdev_priv;
47880 struct net_device *dev = pDevice->dev;
47881 int ret;
47882 - const struct net_device_ops apdev_netdev_ops = {
47883 - .ndo_start_xmit = pDevice->tx_80211,
47884 - };
47885
47886 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
47887
47888 @@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
47889 *apdev_priv = *pDevice;
47890 eth_hw_addr_inherit(pDevice->apdev, dev);
47891
47892 + /* only half broken now */
47893 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
47894 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
47895
47896 pDevice->apdev->type = ARPHRD_IEEE80211;
47897 diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
47898 index c699a30..b90a5fd 100644
47899 --- a/drivers/staging/vt6656/hostap.c
47900 +++ b/drivers/staging/vt6656/hostap.c
47901 @@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
47902 *
47903 */
47904
47905 +static net_device_ops_no_const apdev_netdev_ops;
47906 +
47907 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
47908 {
47909 struct vnt_private *apdev_priv;
47910 struct net_device *dev = pDevice->dev;
47911 int ret;
47912 - const struct net_device_ops apdev_netdev_ops = {
47913 - .ndo_start_xmit = pDevice->tx_80211,
47914 - };
47915
47916 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
47917
47918 @@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
47919 *apdev_priv = *pDevice;
47920 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
47921
47922 + /* only half broken now */
47923 + apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
47924 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
47925
47926 pDevice->apdev->type = ARPHRD_IEEE80211;
47927 diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
47928 index e51b09a..5ebac31 100644
47929 --- a/drivers/target/sbp/sbp_target.c
47930 +++ b/drivers/target/sbp/sbp_target.c
47931 @@ -62,7 +62,7 @@ static const u32 sbp_unit_directory_template[] = {
47932
47933 #define SESSION_MAINTENANCE_INTERVAL HZ
47934
47935 -static atomic_t login_id = ATOMIC_INIT(0);
47936 +static atomic_unchecked_t login_id = ATOMIC_INIT(0);
47937
47938 static void session_maintenance_work(struct work_struct *);
47939 static int sbp_run_transaction(struct fw_card *, int, int, int, int,
47940 @@ -444,7 +444,7 @@ static void sbp_management_request_login(
47941 login->lun = se_lun;
47942 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
47943 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
47944 - login->login_id = atomic_inc_return(&login_id);
47945 + login->login_id = atomic_inc_return_unchecked(&login_id);
47946
47947 login->tgt_agt = sbp_target_agent_register(login);
47948 if (IS_ERR(login->tgt_agt)) {
47949 diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
47950 index d90dbb0..6cbe585 100644
47951 --- a/drivers/target/target_core_device.c
47952 +++ b/drivers/target/target_core_device.c
47953 @@ -1431,7 +1431,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
47954 spin_lock_init(&dev->se_tmr_lock);
47955 spin_lock_init(&dev->qf_cmd_lock);
47956 sema_init(&dev->caw_sem, 1);
47957 - atomic_set(&dev->dev_ordered_id, 0);
47958 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
47959 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
47960 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
47961 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
47962 diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
47963 index 0b0009b..215e88e 100644
47964 --- a/drivers/target/target_core_transport.c
47965 +++ b/drivers/target/target_core_transport.c
47966 @@ -1137,7 +1137,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
47967 * Used to determine when ORDERED commands should go from
47968 * Dormant to Active status.
47969 */
47970 - cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
47971 + cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
47972 smp_mb__after_atomic_inc();
47973 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
47974 cmd->se_ordered_id, cmd->sam_task_attr,
47975 diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
47976 index 33f83fe..d80f8e1 100644
47977 --- a/drivers/tty/cyclades.c
47978 +++ b/drivers/tty/cyclades.c
47979 @@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
47980 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
47981 info->port.count);
47982 #endif
47983 - info->port.count++;
47984 + atomic_inc(&info->port.count);
47985 #ifdef CY_DEBUG_COUNT
47986 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
47987 - current->pid, info->port.count);
47988 + current->pid, atomic_read(&info->port.count));
47989 #endif
47990
47991 /*
47992 @@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
47993 for (j = 0; j < cy_card[i].nports; j++) {
47994 info = &cy_card[i].ports[j];
47995
47996 - if (info->port.count) {
47997 + if (atomic_read(&info->port.count)) {
47998 /* XXX is the ldisc num worth this? */
47999 struct tty_struct *tty;
48000 struct tty_ldisc *ld;
48001 diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
48002 index 9eba119..5070303 100644
48003 --- a/drivers/tty/hvc/hvc_console.c
48004 +++ b/drivers/tty/hvc/hvc_console.c
48005 @@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
48006
48007 spin_lock_irqsave(&hp->port.lock, flags);
48008 /* Check and then increment for fast path open. */
48009 - if (hp->port.count++ > 0) {
48010 + if (atomic_inc_return(&hp->port.count) > 1) {
48011 spin_unlock_irqrestore(&hp->port.lock, flags);
48012 hvc_kick();
48013 return 0;
48014 @@ -393,7 +393,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
48015
48016 spin_lock_irqsave(&hp->port.lock, flags);
48017
48018 - if (--hp->port.count == 0) {
48019 + if (atomic_dec_return(&hp->port.count) == 0) {
48020 spin_unlock_irqrestore(&hp->port.lock, flags);
48021 /* We are done with the tty pointer now. */
48022 tty_port_tty_set(&hp->port, NULL);
48023 @@ -415,9 +415,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
48024 */
48025 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
48026 } else {
48027 - if (hp->port.count < 0)
48028 + if (atomic_read(&hp->port.count) < 0)
48029 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
48030 - hp->vtermno, hp->port.count);
48031 + hp->vtermno, atomic_read(&hp->port.count));
48032 spin_unlock_irqrestore(&hp->port.lock, flags);
48033 }
48034 }
48035 @@ -447,12 +447,12 @@ static void hvc_hangup(struct tty_struct *tty)
48036 * open->hangup case this can be called after the final close so prevent
48037 * that from happening for now.
48038 */
48039 - if (hp->port.count <= 0) {
48040 + if (atomic_read(&hp->port.count) <= 0) {
48041 spin_unlock_irqrestore(&hp->port.lock, flags);
48042 return;
48043 }
48044
48045 - hp->port.count = 0;
48046 + atomic_set(&hp->port.count, 0);
48047 spin_unlock_irqrestore(&hp->port.lock, flags);
48048 tty_port_tty_set(&hp->port, NULL);
48049
48050 @@ -500,7 +500,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
48051 return -EPIPE;
48052
48053 /* FIXME what's this (unprotected) check for? */
48054 - if (hp->port.count <= 0)
48055 + if (atomic_read(&hp->port.count) <= 0)
48056 return -EIO;
48057
48058 spin_lock_irqsave(&hp->lock, flags);
48059 diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
48060 index 81e939e..95ead10 100644
48061 --- a/drivers/tty/hvc/hvcs.c
48062 +++ b/drivers/tty/hvc/hvcs.c
48063 @@ -83,6 +83,7 @@
48064 #include <asm/hvcserver.h>
48065 #include <asm/uaccess.h>
48066 #include <asm/vio.h>
48067 +#include <asm/local.h>
48068
48069 /*
48070 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
48071 @@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
48072
48073 spin_lock_irqsave(&hvcsd->lock, flags);
48074
48075 - if (hvcsd->port.count > 0) {
48076 + if (atomic_read(&hvcsd->port.count) > 0) {
48077 spin_unlock_irqrestore(&hvcsd->lock, flags);
48078 printk(KERN_INFO "HVCS: vterm state unchanged. "
48079 "The hvcs device node is still in use.\n");
48080 @@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
48081 }
48082 }
48083
48084 - hvcsd->port.count = 0;
48085 + atomic_set(&hvcsd->port.count, 0);
48086 hvcsd->port.tty = tty;
48087 tty->driver_data = hvcsd;
48088
48089 @@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
48090 unsigned long flags;
48091
48092 spin_lock_irqsave(&hvcsd->lock, flags);
48093 - hvcsd->port.count++;
48094 + atomic_inc(&hvcsd->port.count);
48095 hvcsd->todo_mask |= HVCS_SCHED_READ;
48096 spin_unlock_irqrestore(&hvcsd->lock, flags);
48097
48098 @@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
48099 hvcsd = tty->driver_data;
48100
48101 spin_lock_irqsave(&hvcsd->lock, flags);
48102 - if (--hvcsd->port.count == 0) {
48103 + if (atomic_dec_and_test(&hvcsd->port.count)) {
48104
48105 vio_disable_interrupts(hvcsd->vdev);
48106
48107 @@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
48108
48109 free_irq(irq, hvcsd);
48110 return;
48111 - } else if (hvcsd->port.count < 0) {
48112 + } else if (atomic_read(&hvcsd->port.count) < 0) {
48113 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
48114 " is missmanaged.\n",
48115 - hvcsd->vdev->unit_address, hvcsd->port.count);
48116 + hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
48117 }
48118
48119 spin_unlock_irqrestore(&hvcsd->lock, flags);
48120 @@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
48121
48122 spin_lock_irqsave(&hvcsd->lock, flags);
48123 /* Preserve this so that we know how many kref refs to put */
48124 - temp_open_count = hvcsd->port.count;
48125 + temp_open_count = atomic_read(&hvcsd->port.count);
48126
48127 /*
48128 * Don't kref put inside the spinlock because the destruction
48129 @@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
48130 tty->driver_data = NULL;
48131 hvcsd->port.tty = NULL;
48132
48133 - hvcsd->port.count = 0;
48134 + atomic_set(&hvcsd->port.count, 0);
48135
48136 /* This will drop any buffered data on the floor which is OK in a hangup
48137 * scenario. */
48138 @@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
48139 * the middle of a write operation? This is a crummy place to do this
48140 * but we want to keep it all in the spinlock.
48141 */
48142 - if (hvcsd->port.count <= 0) {
48143 + if (atomic_read(&hvcsd->port.count) <= 0) {
48144 spin_unlock_irqrestore(&hvcsd->lock, flags);
48145 return -ENODEV;
48146 }
48147 @@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
48148 {
48149 struct hvcs_struct *hvcsd = tty->driver_data;
48150
48151 - if (!hvcsd || hvcsd->port.count <= 0)
48152 + if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
48153 return 0;
48154
48155 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
48156 diff --git a/drivers/tty/hvc/hvsi.c b/drivers/tty/hvc/hvsi.c
48157 index 4190199..48f2920 100644
48158 --- a/drivers/tty/hvc/hvsi.c
48159 +++ b/drivers/tty/hvc/hvsi.c
48160 @@ -85,7 +85,7 @@ struct hvsi_struct {
48161 int n_outbuf;
48162 uint32_t vtermno;
48163 uint32_t virq;
48164 - atomic_t seqno; /* HVSI packet sequence number */
48165 + atomic_unchecked_t seqno; /* HVSI packet sequence number */
48166 uint16_t mctrl;
48167 uint8_t state; /* HVSI protocol state */
48168 uint8_t flags;
48169 @@ -295,7 +295,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno)
48170
48171 packet.hdr.type = VS_QUERY_RESPONSE_PACKET_HEADER;
48172 packet.hdr.len = sizeof(struct hvsi_query_response);
48173 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
48174 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
48175 packet.verb = VSV_SEND_VERSION_NUMBER;
48176 packet.u.version = HVSI_VERSION;
48177 packet.query_seqno = query_seqno+1;
48178 @@ -555,7 +555,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb)
48179
48180 packet.hdr.type = VS_QUERY_PACKET_HEADER;
48181 packet.hdr.len = sizeof(struct hvsi_query);
48182 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
48183 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
48184 packet.verb = verb;
48185
48186 pr_debug("%s: sending %i bytes\n", __func__, packet.hdr.len);
48187 @@ -597,7 +597,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl)
48188 int wrote;
48189
48190 packet.hdr.type = VS_CONTROL_PACKET_HEADER,
48191 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
48192 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
48193 packet.hdr.len = sizeof(struct hvsi_control);
48194 packet.verb = VSV_SET_MODEM_CTL;
48195 packet.mask = HVSI_TSDTR;
48196 @@ -680,7 +680,7 @@ static int hvsi_put_chars(struct hvsi_struct *hp, const char *buf, int count)
48197 BUG_ON(count > HVSI_MAX_OUTGOING_DATA);
48198
48199 packet.hdr.type = VS_DATA_PACKET_HEADER;
48200 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
48201 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
48202 packet.hdr.len = count + sizeof(struct hvsi_header);
48203 memcpy(&packet.data, buf, count);
48204
48205 @@ -697,7 +697,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp)
48206 struct hvsi_control packet __ALIGNED__;
48207
48208 packet.hdr.type = VS_CONTROL_PACKET_HEADER;
48209 - packet.hdr.seqno = atomic_inc_return(&hp->seqno);
48210 + packet.hdr.seqno = atomic_inc_return_unchecked(&hp->seqno);
48211 packet.hdr.len = 6;
48212 packet.verb = VSV_CLOSE_PROTOCOL;
48213
48214 diff --git a/drivers/tty/hvc/hvsi_lib.c b/drivers/tty/hvc/hvsi_lib.c
48215 index ac27671..0f627ee 100644
48216 --- a/drivers/tty/hvc/hvsi_lib.c
48217 +++ b/drivers/tty/hvc/hvsi_lib.c
48218 @@ -9,7 +9,7 @@
48219
48220 static int hvsi_send_packet(struct hvsi_priv *pv, struct hvsi_header *packet)
48221 {
48222 - packet->seqno = atomic_inc_return(&pv->seqno);
48223 + packet->seqno = atomic_inc_return_unchecked(&pv->seqno);
48224
48225 /* Assumes that always succeeds, works in practice */
48226 return pv->put_chars(pv->termno, (char *)packet, packet->len);
48227 @@ -21,7 +21,7 @@ static void hvsi_start_handshake(struct hvsi_priv *pv)
48228
48229 /* Reset state */
48230 pv->established = 0;
48231 - atomic_set(&pv->seqno, 0);
48232 + atomic_set_unchecked(&pv->seqno, 0);
48233
48234 pr_devel("HVSI@%x: Handshaking started\n", pv->termno);
48235
48236 @@ -265,7 +265,7 @@ int hvsilib_read_mctrl(struct hvsi_priv *pv)
48237 pv->mctrl_update = 0;
48238 q.hdr.type = VS_QUERY_PACKET_HEADER;
48239 q.hdr.len = sizeof(struct hvsi_query);
48240 - q.hdr.seqno = atomic_inc_return(&pv->seqno);
48241 + q.hdr.seqno = atomic_inc_return_unchecked(&pv->seqno);
48242 q.verb = VSV_SEND_MODEM_CTL_STATUS;
48243 rc = hvsi_send_packet(pv, &q.hdr);
48244 if (rc <= 0) {
48245 diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
48246 index 8fd72ff..34a0bed 100644
48247 --- a/drivers/tty/ipwireless/tty.c
48248 +++ b/drivers/tty/ipwireless/tty.c
48249 @@ -29,6 +29,7 @@
48250 #include <linux/tty_driver.h>
48251 #include <linux/tty_flip.h>
48252 #include <linux/uaccess.h>
48253 +#include <asm/local.h>
48254
48255 #include "tty.h"
48256 #include "network.h"
48257 @@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
48258 mutex_unlock(&tty->ipw_tty_mutex);
48259 return -ENODEV;
48260 }
48261 - if (tty->port.count == 0)
48262 + if (atomic_read(&tty->port.count) == 0)
48263 tty->tx_bytes_queued = 0;
48264
48265 - tty->port.count++;
48266 + atomic_inc(&tty->port.count);
48267
48268 tty->port.tty = linux_tty;
48269 linux_tty->driver_data = tty;
48270 @@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
48271
48272 static void do_ipw_close(struct ipw_tty *tty)
48273 {
48274 - tty->port.count--;
48275 -
48276 - if (tty->port.count == 0) {
48277 + if (atomic_dec_return(&tty->port.count) == 0) {
48278 struct tty_struct *linux_tty = tty->port.tty;
48279
48280 if (linux_tty != NULL) {
48281 @@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
48282 return;
48283
48284 mutex_lock(&tty->ipw_tty_mutex);
48285 - if (tty->port.count == 0) {
48286 + if (atomic_read(&tty->port.count) == 0) {
48287 mutex_unlock(&tty->ipw_tty_mutex);
48288 return;
48289 }
48290 @@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
48291
48292 mutex_lock(&tty->ipw_tty_mutex);
48293
48294 - if (!tty->port.count) {
48295 + if (!atomic_read(&tty->port.count)) {
48296 mutex_unlock(&tty->ipw_tty_mutex);
48297 return;
48298 }
48299 @@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
48300 return -ENODEV;
48301
48302 mutex_lock(&tty->ipw_tty_mutex);
48303 - if (!tty->port.count) {
48304 + if (!atomic_read(&tty->port.count)) {
48305 mutex_unlock(&tty->ipw_tty_mutex);
48306 return -EINVAL;
48307 }
48308 @@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
48309 if (!tty)
48310 return -ENODEV;
48311
48312 - if (!tty->port.count)
48313 + if (!atomic_read(&tty->port.count))
48314 return -EINVAL;
48315
48316 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
48317 @@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
48318 if (!tty)
48319 return 0;
48320
48321 - if (!tty->port.count)
48322 + if (!atomic_read(&tty->port.count))
48323 return 0;
48324
48325 return tty->tx_bytes_queued;
48326 @@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
48327 if (!tty)
48328 return -ENODEV;
48329
48330 - if (!tty->port.count)
48331 + if (!atomic_read(&tty->port.count))
48332 return -EINVAL;
48333
48334 return get_control_lines(tty);
48335 @@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
48336 if (!tty)
48337 return -ENODEV;
48338
48339 - if (!tty->port.count)
48340 + if (!atomic_read(&tty->port.count))
48341 return -EINVAL;
48342
48343 return set_control_lines(tty, set, clear);
48344 @@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
48345 if (!tty)
48346 return -ENODEV;
48347
48348 - if (!tty->port.count)
48349 + if (!atomic_read(&tty->port.count))
48350 return -EINVAL;
48351
48352 /* FIXME: Exactly how is the tty object locked here .. */
48353 @@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
48354 * are gone */
48355 mutex_lock(&ttyj->ipw_tty_mutex);
48356 }
48357 - while (ttyj->port.count)
48358 + while (atomic_read(&ttyj->port.count))
48359 do_ipw_close(ttyj);
48360 ipwireless_disassociate_network_ttys(network,
48361 ttyj->channel_idx);
48362 diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
48363 index 1deaca4..c8582d4 100644
48364 --- a/drivers/tty/moxa.c
48365 +++ b/drivers/tty/moxa.c
48366 @@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
48367 }
48368
48369 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
48370 - ch->port.count++;
48371 + atomic_inc(&ch->port.count);
48372 tty->driver_data = ch;
48373 tty_port_tty_set(&ch->port, tty);
48374 mutex_lock(&ch->port.mutex);
48375 diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
48376 index c0f76da..d974c32 100644
48377 --- a/drivers/tty/n_gsm.c
48378 +++ b/drivers/tty/n_gsm.c
48379 @@ -1632,7 +1632,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
48380 spin_lock_init(&dlci->lock);
48381 mutex_init(&dlci->mutex);
48382 dlci->fifo = &dlci->_fifo;
48383 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
48384 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
48385 kfree(dlci);
48386 return NULL;
48387 }
48388 @@ -2935,7 +2935,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
48389 struct gsm_dlci *dlci = tty->driver_data;
48390 struct tty_port *port = &dlci->port;
48391
48392 - port->count++;
48393 + atomic_inc(&port->count);
48394 dlci_get(dlci);
48395 dlci_get(dlci->gsm->dlci[0]);
48396 mux_get(dlci->gsm);
48397 diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
48398 index 4d6f430..0810fa9 100644
48399 --- a/drivers/tty/n_tty.c
48400 +++ b/drivers/tty/n_tty.c
48401 @@ -2504,6 +2504,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
48402 {
48403 *ops = tty_ldisc_N_TTY;
48404 ops->owner = NULL;
48405 - ops->refcount = ops->flags = 0;
48406 + atomic_set(&ops->refcount, 0);
48407 + ops->flags = 0;
48408 }
48409 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
48410 diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
48411 index 25c9bc7..24077b7 100644
48412 --- a/drivers/tty/pty.c
48413 +++ b/drivers/tty/pty.c
48414 @@ -790,8 +790,10 @@ static void __init unix98_pty_init(void)
48415 panic("Couldn't register Unix98 pts driver");
48416
48417 /* Now create the /dev/ptmx special device */
48418 + pax_open_kernel();
48419 tty_default_fops(&ptmx_fops);
48420 - ptmx_fops.open = ptmx_open;
48421 + *(void **)&ptmx_fops.open = ptmx_open;
48422 + pax_close_kernel();
48423
48424 cdev_init(&ptmx_cdev, &ptmx_fops);
48425 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
48426 diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
48427 index 354564e..fe50d9a 100644
48428 --- a/drivers/tty/rocket.c
48429 +++ b/drivers/tty/rocket.c
48430 @@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
48431 tty->driver_data = info;
48432 tty_port_tty_set(port, tty);
48433
48434 - if (port->count++ == 0) {
48435 + if (atomic_inc_return(&port->count) == 1) {
48436 atomic_inc(&rp_num_ports_open);
48437
48438 #ifdef ROCKET_DEBUG_OPEN
48439 @@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
48440 #endif
48441 }
48442 #ifdef ROCKET_DEBUG_OPEN
48443 - printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
48444 + printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
48445 #endif
48446
48447 /*
48448 @@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
48449 spin_unlock_irqrestore(&info->port.lock, flags);
48450 return;
48451 }
48452 - if (info->port.count)
48453 + if (atomic_read(&info->port.count))
48454 atomic_dec(&rp_num_ports_open);
48455 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
48456 spin_unlock_irqrestore(&info->port.lock, flags);
48457 diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c
48458 index 1274499..f541382 100644
48459 --- a/drivers/tty/serial/ioc4_serial.c
48460 +++ b/drivers/tty/serial/ioc4_serial.c
48461 @@ -437,7 +437,7 @@ struct ioc4_soft {
48462 } is_intr_info[MAX_IOC4_INTR_ENTS];
48463
48464 /* Number of entries active in the above array */
48465 - atomic_t is_num_intrs;
48466 + atomic_unchecked_t is_num_intrs;
48467 } is_intr_type[IOC4_NUM_INTR_TYPES];
48468
48469 /* is_ir_lock must be held while
48470 @@ -974,7 +974,7 @@ intr_connect(struct ioc4_soft *soft, int type,
48471 BUG_ON(!((type == IOC4_SIO_INTR_TYPE)
48472 || (type == IOC4_OTHER_INTR_TYPE)));
48473
48474 - i = atomic_inc_return(&soft-> is_intr_type[type].is_num_intrs) - 1;
48475 + i = atomic_inc_return_unchecked(&soft-> is_intr_type[type].is_num_intrs) - 1;
48476 BUG_ON(!(i < MAX_IOC4_INTR_ENTS || (printk("i %d\n", i), 0)));
48477
48478 /* Save off the lower level interrupt handler */
48479 @@ -1001,7 +1001,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg)
48480
48481 soft = arg;
48482 for (intr_type = 0; intr_type < IOC4_NUM_INTR_TYPES; intr_type++) {
48483 - num_intrs = (int)atomic_read(
48484 + num_intrs = (int)atomic_read_unchecked(
48485 &soft->is_intr_type[intr_type].is_num_intrs);
48486
48487 this_mir = this_ir = pending_intrs(soft, intr_type);
48488 diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
48489 index a260cde..6b2b5ce 100644
48490 --- a/drivers/tty/serial/kgdboc.c
48491 +++ b/drivers/tty/serial/kgdboc.c
48492 @@ -24,8 +24,9 @@
48493 #define MAX_CONFIG_LEN 40
48494
48495 static struct kgdb_io kgdboc_io_ops;
48496 +static struct kgdb_io kgdboc_io_ops_console;
48497
48498 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
48499 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
48500 static int configured = -1;
48501
48502 static char config[MAX_CONFIG_LEN];
48503 @@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
48504 kgdboc_unregister_kbd();
48505 if (configured == 1)
48506 kgdb_unregister_io_module(&kgdboc_io_ops);
48507 + else if (configured == 2)
48508 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
48509 }
48510
48511 static int configure_kgdboc(void)
48512 @@ -160,13 +163,13 @@ static int configure_kgdboc(void)
48513 int err;
48514 char *cptr = config;
48515 struct console *cons;
48516 + int is_console = 0;
48517
48518 err = kgdboc_option_setup(config);
48519 if (err || !strlen(config) || isspace(config[0]))
48520 goto noconfig;
48521
48522 err = -ENODEV;
48523 - kgdboc_io_ops.is_console = 0;
48524 kgdb_tty_driver = NULL;
48525
48526 kgdboc_use_kms = 0;
48527 @@ -187,7 +190,7 @@ static int configure_kgdboc(void)
48528 int idx;
48529 if (cons->device && cons->device(cons, &idx) == p &&
48530 idx == tty_line) {
48531 - kgdboc_io_ops.is_console = 1;
48532 + is_console = 1;
48533 break;
48534 }
48535 cons = cons->next;
48536 @@ -197,7 +200,13 @@ static int configure_kgdboc(void)
48537 kgdb_tty_line = tty_line;
48538
48539 do_register:
48540 - err = kgdb_register_io_module(&kgdboc_io_ops);
48541 + if (is_console) {
48542 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
48543 + configured = 2;
48544 + } else {
48545 + err = kgdb_register_io_module(&kgdboc_io_ops);
48546 + configured = 1;
48547 + }
48548 if (err)
48549 goto noconfig;
48550
48551 @@ -205,8 +214,6 @@ do_register:
48552 if (err)
48553 goto nmi_con_failed;
48554
48555 - configured = 1;
48556 -
48557 return 0;
48558
48559 nmi_con_failed:
48560 @@ -223,7 +230,7 @@ noconfig:
48561 static int __init init_kgdboc(void)
48562 {
48563 /* Already configured? */
48564 - if (configured == 1)
48565 + if (configured >= 1)
48566 return 0;
48567
48568 return configure_kgdboc();
48569 @@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
48570 if (config[len - 1] == '\n')
48571 config[len - 1] = '\0';
48572
48573 - if (configured == 1)
48574 + if (configured >= 1)
48575 cleanup_kgdboc();
48576
48577 /* Go and configure with the new params. */
48578 @@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
48579 .post_exception = kgdboc_post_exp_handler,
48580 };
48581
48582 +static struct kgdb_io kgdboc_io_ops_console = {
48583 + .name = "kgdboc",
48584 + .read_char = kgdboc_get_char,
48585 + .write_char = kgdboc_put_char,
48586 + .pre_exception = kgdboc_pre_exp_handler,
48587 + .post_exception = kgdboc_post_exp_handler,
48588 + .is_console = 1
48589 +};
48590 +
48591 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
48592 /* This is only available if kgdboc is a built in for early debugging */
48593 static int __init kgdboc_early_init(char *opt)
48594 diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
48595 index b5d779c..3622cfe 100644
48596 --- a/drivers/tty/serial/msm_serial.c
48597 +++ b/drivers/tty/serial/msm_serial.c
48598 @@ -897,7 +897,7 @@ static struct uart_driver msm_uart_driver = {
48599 .cons = MSM_CONSOLE,
48600 };
48601
48602 -static atomic_t msm_uart_next_id = ATOMIC_INIT(0);
48603 +static atomic_unchecked_t msm_uart_next_id = ATOMIC_INIT(0);
48604
48605 static const struct of_device_id msm_uartdm_table[] = {
48606 { .compatible = "qcom,msm-uartdm" },
48607 @@ -912,7 +912,7 @@ static int __init msm_serial_probe(struct platform_device *pdev)
48608 int irq;
48609
48610 if (pdev->id == -1)
48611 - pdev->id = atomic_inc_return(&msm_uart_next_id) - 1;
48612 + pdev->id = atomic_inc_return_unchecked(&msm_uart_next_id) - 1;
48613
48614 if (unlikely(pdev->id < 0 || pdev->id >= UART_NR))
48615 return -ENXIO;
48616 diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
48617 index f3dfa19..342f2ff 100644
48618 --- a/drivers/tty/serial/samsung.c
48619 +++ b/drivers/tty/serial/samsung.c
48620 @@ -456,11 +456,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
48621 }
48622 }
48623
48624 +static int s3c64xx_serial_startup(struct uart_port *port);
48625 static int s3c24xx_serial_startup(struct uart_port *port)
48626 {
48627 struct s3c24xx_uart_port *ourport = to_ourport(port);
48628 int ret;
48629
48630 + /* Startup sequence is different for s3c64xx and higher SoC's */
48631 + if (s3c24xx_serial_has_interrupt_mask(port))
48632 + return s3c64xx_serial_startup(port);
48633 +
48634 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
48635 port->mapbase, port->membase);
48636
48637 @@ -1127,10 +1132,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
48638 /* setup info for port */
48639 port->dev = &platdev->dev;
48640
48641 - /* Startup sequence is different for s3c64xx and higher SoC's */
48642 - if (s3c24xx_serial_has_interrupt_mask(port))
48643 - s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
48644 -
48645 port->uartclk = 1;
48646
48647 if (cfg->uart_flags & UPF_CONS_FLOW) {
48648 diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
48649 index 0f02351..07c59c5 100644
48650 --- a/drivers/tty/serial/serial_core.c
48651 +++ b/drivers/tty/serial/serial_core.c
48652 @@ -1448,7 +1448,7 @@ static void uart_hangup(struct tty_struct *tty)
48653 uart_flush_buffer(tty);
48654 uart_shutdown(tty, state);
48655 spin_lock_irqsave(&port->lock, flags);
48656 - port->count = 0;
48657 + atomic_set(&port->count, 0);
48658 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
48659 spin_unlock_irqrestore(&port->lock, flags);
48660 tty_port_tty_set(port, NULL);
48661 @@ -1544,7 +1544,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
48662 goto end;
48663 }
48664
48665 - port->count++;
48666 + atomic_inc(&port->count);
48667 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
48668 retval = -ENXIO;
48669 goto err_dec_count;
48670 @@ -1572,7 +1572,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
48671 /*
48672 * Make sure the device is in D0 state.
48673 */
48674 - if (port->count == 1)
48675 + if (atomic_read(&port->count) == 1)
48676 uart_change_pm(state, UART_PM_STATE_ON);
48677
48678 /*
48679 @@ -1590,7 +1590,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
48680 end:
48681 return retval;
48682 err_dec_count:
48683 - port->count--;
48684 + atomic_inc(&port->count);
48685 mutex_unlock(&port->mutex);
48686 goto end;
48687 }
48688 diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
48689 index e1ce141..6d4ed80 100644
48690 --- a/drivers/tty/synclink.c
48691 +++ b/drivers/tty/synclink.c
48692 @@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
48693
48694 if (debug_level >= DEBUG_LEVEL_INFO)
48695 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
48696 - __FILE__,__LINE__, info->device_name, info->port.count);
48697 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
48698
48699 if (tty_port_close_start(&info->port, tty, filp) == 0)
48700 goto cleanup;
48701 @@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
48702 cleanup:
48703 if (debug_level >= DEBUG_LEVEL_INFO)
48704 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
48705 - tty->driver->name, info->port.count);
48706 + tty->driver->name, atomic_read(&info->port.count));
48707
48708 } /* end of mgsl_close() */
48709
48710 @@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
48711
48712 mgsl_flush_buffer(tty);
48713 shutdown(info);
48714 -
48715 - info->port.count = 0;
48716 +
48717 + atomic_set(&info->port.count, 0);
48718 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
48719 info->port.tty = NULL;
48720
48721 @@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
48722
48723 if (debug_level >= DEBUG_LEVEL_INFO)
48724 printk("%s(%d):block_til_ready before block on %s count=%d\n",
48725 - __FILE__,__LINE__, tty->driver->name, port->count );
48726 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
48727
48728 spin_lock_irqsave(&info->irq_spinlock, flags);
48729 if (!tty_hung_up_p(filp)) {
48730 extra_count = true;
48731 - port->count--;
48732 + atomic_dec(&port->count);
48733 }
48734 spin_unlock_irqrestore(&info->irq_spinlock, flags);
48735 port->blocked_open++;
48736 @@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
48737
48738 if (debug_level >= DEBUG_LEVEL_INFO)
48739 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
48740 - __FILE__,__LINE__, tty->driver->name, port->count );
48741 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
48742
48743 tty_unlock(tty);
48744 schedule();
48745 @@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
48746
48747 /* FIXME: Racy on hangup during close wait */
48748 if (extra_count)
48749 - port->count++;
48750 + atomic_inc(&port->count);
48751 port->blocked_open--;
48752
48753 if (debug_level >= DEBUG_LEVEL_INFO)
48754 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
48755 - __FILE__,__LINE__, tty->driver->name, port->count );
48756 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
48757
48758 if (!retval)
48759 port->flags |= ASYNC_NORMAL_ACTIVE;
48760 @@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
48761
48762 if (debug_level >= DEBUG_LEVEL_INFO)
48763 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
48764 - __FILE__,__LINE__,tty->driver->name, info->port.count);
48765 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
48766
48767 /* If port is closing, signal caller to try again */
48768 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
48769 @@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
48770 spin_unlock_irqrestore(&info->netlock, flags);
48771 goto cleanup;
48772 }
48773 - info->port.count++;
48774 + atomic_inc(&info->port.count);
48775 spin_unlock_irqrestore(&info->netlock, flags);
48776
48777 - if (info->port.count == 1) {
48778 + if (atomic_read(&info->port.count) == 1) {
48779 /* 1st open on this device, init hardware */
48780 retval = startup(info);
48781 if (retval < 0)
48782 @@ -3446,8 +3446,8 @@ cleanup:
48783 if (retval) {
48784 if (tty->count == 1)
48785 info->port.tty = NULL; /* tty layer will release tty struct */
48786 - if(info->port.count)
48787 - info->port.count--;
48788 + if (atomic_read(&info->port.count))
48789 + atomic_dec(&info->port.count);
48790 }
48791
48792 return retval;
48793 @@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
48794 unsigned short new_crctype;
48795
48796 /* return error if TTY interface open */
48797 - if (info->port.count)
48798 + if (atomic_read(&info->port.count))
48799 return -EBUSY;
48800
48801 switch (encoding)
48802 @@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
48803
48804 /* arbitrate between network and tty opens */
48805 spin_lock_irqsave(&info->netlock, flags);
48806 - if (info->port.count != 0 || info->netcount != 0) {
48807 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
48808 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
48809 spin_unlock_irqrestore(&info->netlock, flags);
48810 return -EBUSY;
48811 @@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48812 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
48813
48814 /* return error if TTY interface open */
48815 - if (info->port.count)
48816 + if (atomic_read(&info->port.count))
48817 return -EBUSY;
48818
48819 if (cmd != SIOCWANDEV)
48820 diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
48821 index 1abf946..1ee34fc 100644
48822 --- a/drivers/tty/synclink_gt.c
48823 +++ b/drivers/tty/synclink_gt.c
48824 @@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
48825 tty->driver_data = info;
48826 info->port.tty = tty;
48827
48828 - DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
48829 + DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
48830
48831 /* If port is closing, signal caller to try again */
48832 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
48833 @@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
48834 mutex_unlock(&info->port.mutex);
48835 goto cleanup;
48836 }
48837 - info->port.count++;
48838 + atomic_inc(&info->port.count);
48839 spin_unlock_irqrestore(&info->netlock, flags);
48840
48841 - if (info->port.count == 1) {
48842 + if (atomic_read(&info->port.count) == 1) {
48843 /* 1st open on this device, init hardware */
48844 retval = startup(info);
48845 if (retval < 0) {
48846 @@ -715,8 +715,8 @@ cleanup:
48847 if (retval) {
48848 if (tty->count == 1)
48849 info->port.tty = NULL; /* tty layer will release tty struct */
48850 - if(info->port.count)
48851 - info->port.count--;
48852 + if(atomic_read(&info->port.count))
48853 + atomic_dec(&info->port.count);
48854 }
48855
48856 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
48857 @@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
48858
48859 if (sanity_check(info, tty->name, "close"))
48860 return;
48861 - DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
48862 + DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
48863
48864 if (tty_port_close_start(&info->port, tty, filp) == 0)
48865 goto cleanup;
48866 @@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
48867 tty_port_close_end(&info->port, tty);
48868 info->port.tty = NULL;
48869 cleanup:
48870 - DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
48871 + DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
48872 }
48873
48874 static void hangup(struct tty_struct *tty)
48875 @@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
48876 shutdown(info);
48877
48878 spin_lock_irqsave(&info->port.lock, flags);
48879 - info->port.count = 0;
48880 + atomic_set(&info->port.count, 0);
48881 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
48882 info->port.tty = NULL;
48883 spin_unlock_irqrestore(&info->port.lock, flags);
48884 @@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
48885 unsigned short new_crctype;
48886
48887 /* return error if TTY interface open */
48888 - if (info->port.count)
48889 + if (atomic_read(&info->port.count))
48890 return -EBUSY;
48891
48892 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
48893 @@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
48894
48895 /* arbitrate between network and tty opens */
48896 spin_lock_irqsave(&info->netlock, flags);
48897 - if (info->port.count != 0 || info->netcount != 0) {
48898 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
48899 DBGINFO(("%s hdlc_open busy\n", dev->name));
48900 spin_unlock_irqrestore(&info->netlock, flags);
48901 return -EBUSY;
48902 @@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
48903 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
48904
48905 /* return error if TTY interface open */
48906 - if (info->port.count)
48907 + if (atomic_read(&info->port.count))
48908 return -EBUSY;
48909
48910 if (cmd != SIOCWANDEV)
48911 @@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
48912 if (port == NULL)
48913 continue;
48914 spin_lock(&port->lock);
48915 - if ((port->port.count || port->netcount) &&
48916 + if ((atomic_read(&port->port.count) || port->netcount) &&
48917 port->pending_bh && !port->bh_running &&
48918 !port->bh_requested) {
48919 DBGISR(("%s bh queued\n", port->device_name));
48920 @@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
48921 spin_lock_irqsave(&info->lock, flags);
48922 if (!tty_hung_up_p(filp)) {
48923 extra_count = true;
48924 - port->count--;
48925 + atomic_dec(&port->count);
48926 }
48927 spin_unlock_irqrestore(&info->lock, flags);
48928 port->blocked_open++;
48929 @@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
48930 remove_wait_queue(&port->open_wait, &wait);
48931
48932 if (extra_count)
48933 - port->count++;
48934 + atomic_inc(&port->count);
48935 port->blocked_open--;
48936
48937 if (!retval)
48938 diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
48939 index dc6e969..5dc8786 100644
48940 --- a/drivers/tty/synclinkmp.c
48941 +++ b/drivers/tty/synclinkmp.c
48942 @@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
48943
48944 if (debug_level >= DEBUG_LEVEL_INFO)
48945 printk("%s(%d):%s open(), old ref count = %d\n",
48946 - __FILE__,__LINE__,tty->driver->name, info->port.count);
48947 + __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
48948
48949 /* If port is closing, signal caller to try again */
48950 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
48951 @@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
48952 spin_unlock_irqrestore(&info->netlock, flags);
48953 goto cleanup;
48954 }
48955 - info->port.count++;
48956 + atomic_inc(&info->port.count);
48957 spin_unlock_irqrestore(&info->netlock, flags);
48958
48959 - if (info->port.count == 1) {
48960 + if (atomic_read(&info->port.count) == 1) {
48961 /* 1st open on this device, init hardware */
48962 retval = startup(info);
48963 if (retval < 0)
48964 @@ -796,8 +796,8 @@ cleanup:
48965 if (retval) {
48966 if (tty->count == 1)
48967 info->port.tty = NULL; /* tty layer will release tty struct */
48968 - if(info->port.count)
48969 - info->port.count--;
48970 + if(atomic_read(&info->port.count))
48971 + atomic_dec(&info->port.count);
48972 }
48973
48974 return retval;
48975 @@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
48976
48977 if (debug_level >= DEBUG_LEVEL_INFO)
48978 printk("%s(%d):%s close() entry, count=%d\n",
48979 - __FILE__,__LINE__, info->device_name, info->port.count);
48980 + __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
48981
48982 if (tty_port_close_start(&info->port, tty, filp) == 0)
48983 goto cleanup;
48984 @@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
48985 cleanup:
48986 if (debug_level >= DEBUG_LEVEL_INFO)
48987 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
48988 - tty->driver->name, info->port.count);
48989 + tty->driver->name, atomic_read(&info->port.count));
48990 }
48991
48992 /* Called by tty_hangup() when a hangup is signaled.
48993 @@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
48994 shutdown(info);
48995
48996 spin_lock_irqsave(&info->port.lock, flags);
48997 - info->port.count = 0;
48998 + atomic_set(&info->port.count, 0);
48999 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
49000 info->port.tty = NULL;
49001 spin_unlock_irqrestore(&info->port.lock, flags);
49002 @@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
49003 unsigned short new_crctype;
49004
49005 /* return error if TTY interface open */
49006 - if (info->port.count)
49007 + if (atomic_read(&info->port.count))
49008 return -EBUSY;
49009
49010 switch (encoding)
49011 @@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
49012
49013 /* arbitrate between network and tty opens */
49014 spin_lock_irqsave(&info->netlock, flags);
49015 - if (info->port.count != 0 || info->netcount != 0) {
49016 + if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
49017 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
49018 spin_unlock_irqrestore(&info->netlock, flags);
49019 return -EBUSY;
49020 @@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
49021 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
49022
49023 /* return error if TTY interface open */
49024 - if (info->port.count)
49025 + if (atomic_read(&info->port.count))
49026 return -EBUSY;
49027
49028 if (cmd != SIOCWANDEV)
49029 @@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
49030 * do not request bottom half processing if the
49031 * device is not open in a normal mode.
49032 */
49033 - if ( port && (port->port.count || port->netcount) &&
49034 + if ( port && (atomic_read(&port->port.count) || port->netcount) &&
49035 port->pending_bh && !port->bh_running &&
49036 !port->bh_requested ) {
49037 if ( debug_level >= DEBUG_LEVEL_ISR )
49038 @@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
49039
49040 if (debug_level >= DEBUG_LEVEL_INFO)
49041 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
49042 - __FILE__,__LINE__, tty->driver->name, port->count );
49043 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
49044
49045 spin_lock_irqsave(&info->lock, flags);
49046 if (!tty_hung_up_p(filp)) {
49047 extra_count = true;
49048 - port->count--;
49049 + atomic_dec(&port->count);
49050 }
49051 spin_unlock_irqrestore(&info->lock, flags);
49052 port->blocked_open++;
49053 @@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
49054
49055 if (debug_level >= DEBUG_LEVEL_INFO)
49056 printk("%s(%d):%s block_til_ready() count=%d\n",
49057 - __FILE__,__LINE__, tty->driver->name, port->count );
49058 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
49059
49060 tty_unlock(tty);
49061 schedule();
49062 @@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
49063 remove_wait_queue(&port->open_wait, &wait);
49064
49065 if (extra_count)
49066 - port->count++;
49067 + atomic_inc(&port->count);
49068 port->blocked_open--;
49069
49070 if (debug_level >= DEBUG_LEVEL_INFO)
49071 printk("%s(%d):%s block_til_ready() after, count=%d\n",
49072 - __FILE__,__LINE__, tty->driver->name, port->count );
49073 + __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
49074
49075 if (!retval)
49076 port->flags |= ASYNC_NORMAL_ACTIVE;
49077 diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
49078 index 40a9fe9..a3f10cc 100644
49079 --- a/drivers/tty/sysrq.c
49080 +++ b/drivers/tty/sysrq.c
49081 @@ -1075,7 +1075,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
49082 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
49083 size_t count, loff_t *ppos)
49084 {
49085 - if (count) {
49086 + if (count && capable(CAP_SYS_ADMIN)) {
49087 char c;
49088
49089 if (get_user(c, buf))
49090 diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
49091 index c74a00a..02cf211a 100644
49092 --- a/drivers/tty/tty_io.c
49093 +++ b/drivers/tty/tty_io.c
49094 @@ -3474,7 +3474,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
49095
49096 void tty_default_fops(struct file_operations *fops)
49097 {
49098 - *fops = tty_fops;
49099 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
49100 }
49101
49102 /*
49103 diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
49104 index 6458e11..6cfc218 100644
49105 --- a/drivers/tty/tty_ldisc.c
49106 +++ b/drivers/tty/tty_ldisc.c
49107 @@ -72,7 +72,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
49108 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
49109 tty_ldiscs[disc] = new_ldisc;
49110 new_ldisc->num = disc;
49111 - new_ldisc->refcount = 0;
49112 + atomic_set(&new_ldisc->refcount, 0);
49113 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
49114
49115 return ret;
49116 @@ -100,7 +100,7 @@ int tty_unregister_ldisc(int disc)
49117 return -EINVAL;
49118
49119 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
49120 - if (tty_ldiscs[disc]->refcount)
49121 + if (atomic_read(&tty_ldiscs[disc]->refcount))
49122 ret = -EBUSY;
49123 else
49124 tty_ldiscs[disc] = NULL;
49125 @@ -121,7 +121,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
49126 if (ldops) {
49127 ret = ERR_PTR(-EAGAIN);
49128 if (try_module_get(ldops->owner)) {
49129 - ldops->refcount++;
49130 + atomic_inc(&ldops->refcount);
49131 ret = ldops;
49132 }
49133 }
49134 @@ -134,7 +134,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
49135 unsigned long flags;
49136
49137 raw_spin_lock_irqsave(&tty_ldiscs_lock, flags);
49138 - ldops->refcount--;
49139 + atomic_dec(&ldops->refcount);
49140 module_put(ldops->owner);
49141 raw_spin_unlock_irqrestore(&tty_ldiscs_lock, flags);
49142 }
49143 diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
49144 index f597e88..b7f68ed 100644
49145 --- a/drivers/tty/tty_port.c
49146 +++ b/drivers/tty/tty_port.c
49147 @@ -232,7 +232,7 @@ void tty_port_hangup(struct tty_port *port)
49148 unsigned long flags;
49149
49150 spin_lock_irqsave(&port->lock, flags);
49151 - port->count = 0;
49152 + atomic_set(&port->count, 0);
49153 port->flags &= ~ASYNC_NORMAL_ACTIVE;
49154 tty = port->tty;
49155 if (tty)
49156 @@ -390,7 +390,7 @@ int tty_port_block_til_ready(struct tty_port *port,
49157 /* The port lock protects the port counts */
49158 spin_lock_irqsave(&port->lock, flags);
49159 if (!tty_hung_up_p(filp))
49160 - port->count--;
49161 + atomic_dec(&port->count);
49162 port->blocked_open++;
49163 spin_unlock_irqrestore(&port->lock, flags);
49164
49165 @@ -432,7 +432,7 @@ int tty_port_block_til_ready(struct tty_port *port,
49166 we must not mess that up further */
49167 spin_lock_irqsave(&port->lock, flags);
49168 if (!tty_hung_up_p(filp))
49169 - port->count++;
49170 + atomic_inc(&port->count);
49171 port->blocked_open--;
49172 if (retval == 0)
49173 port->flags |= ASYNC_NORMAL_ACTIVE;
49174 @@ -466,19 +466,19 @@ int tty_port_close_start(struct tty_port *port,
49175 return 0;
49176 }
49177
49178 - if (tty->count == 1 && port->count != 1) {
49179 + if (tty->count == 1 && atomic_read(&port->count) != 1) {
49180 printk(KERN_WARNING
49181 "tty_port_close_start: tty->count = 1 port count = %d.\n",
49182 - port->count);
49183 - port->count = 1;
49184 + atomic_read(&port->count));
49185 + atomic_set(&port->count, 1);
49186 }
49187 - if (--port->count < 0) {
49188 + if (atomic_dec_return(&port->count) < 0) {
49189 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
49190 - port->count);
49191 - port->count = 0;
49192 + atomic_read(&port->count));
49193 + atomic_set(&port->count, 0);
49194 }
49195
49196 - if (port->count) {
49197 + if (atomic_read(&port->count)) {
49198 spin_unlock_irqrestore(&port->lock, flags);
49199 if (port->ops->drop)
49200 port->ops->drop(port);
49201 @@ -564,7 +564,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
49202 {
49203 spin_lock_irq(&port->lock);
49204 if (!tty_hung_up_p(filp))
49205 - ++port->count;
49206 + atomic_inc(&port->count);
49207 spin_unlock_irq(&port->lock);
49208 tty_port_tty_set(port, tty);
49209
49210 diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
49211 index d0e3a44..5f8b754 100644
49212 --- a/drivers/tty/vt/keyboard.c
49213 +++ b/drivers/tty/vt/keyboard.c
49214 @@ -641,6 +641,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
49215 kbd->kbdmode == VC_OFF) &&
49216 value != KVAL(K_SAK))
49217 return; /* SAK is allowed even in raw mode */
49218 +
49219 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
49220 + {
49221 + void *func = fn_handler[value];
49222 + if (func == fn_show_state || func == fn_show_ptregs ||
49223 + func == fn_show_mem)
49224 + return;
49225 + }
49226 +#endif
49227 +
49228 fn_handler[value](vc);
49229 }
49230
49231 @@ -1776,9 +1786,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
49232 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
49233 return -EFAULT;
49234
49235 - if (!capable(CAP_SYS_TTY_CONFIG))
49236 - perm = 0;
49237 -
49238 switch (cmd) {
49239 case KDGKBENT:
49240 /* Ensure another thread doesn't free it under us */
49241 @@ -1793,6 +1800,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
49242 spin_unlock_irqrestore(&kbd_event_lock, flags);
49243 return put_user(val, &user_kbe->kb_value);
49244 case KDSKBENT:
49245 + if (!capable(CAP_SYS_TTY_CONFIG))
49246 + perm = 0;
49247 +
49248 if (!perm)
49249 return -EPERM;
49250 if (!i && v == K_NOSUCHMAP) {
49251 @@ -1883,9 +1893,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
49252 int i, j, k;
49253 int ret;
49254
49255 - if (!capable(CAP_SYS_TTY_CONFIG))
49256 - perm = 0;
49257 -
49258 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
49259 if (!kbs) {
49260 ret = -ENOMEM;
49261 @@ -1919,6 +1926,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
49262 kfree(kbs);
49263 return ((p && *p) ? -EOVERFLOW : 0);
49264 case KDSKBSENT:
49265 + if (!capable(CAP_SYS_TTY_CONFIG))
49266 + perm = 0;
49267 +
49268 if (!perm) {
49269 ret = -EPERM;
49270 goto reterr;
49271 diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
49272 index 0e808cf..d7d274b 100644
49273 --- a/drivers/uio/uio.c
49274 +++ b/drivers/uio/uio.c
49275 @@ -25,6 +25,7 @@
49276 #include <linux/kobject.h>
49277 #include <linux/cdev.h>
49278 #include <linux/uio_driver.h>
49279 +#include <asm/local.h>
49280
49281 #define UIO_MAX_DEVICES (1U << MINORBITS)
49282
49283 @@ -32,7 +33,7 @@ struct uio_device {
49284 struct module *owner;
49285 struct device *dev;
49286 int minor;
49287 - atomic_t event;
49288 + atomic_unchecked_t event;
49289 struct fasync_struct *async_queue;
49290 wait_queue_head_t wait;
49291 struct uio_info *info;
49292 @@ -243,7 +244,7 @@ static ssize_t event_show(struct device *dev,
49293 struct device_attribute *attr, char *buf)
49294 {
49295 struct uio_device *idev = dev_get_drvdata(dev);
49296 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
49297 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
49298 }
49299 static DEVICE_ATTR_RO(event);
49300
49301 @@ -401,7 +402,7 @@ void uio_event_notify(struct uio_info *info)
49302 {
49303 struct uio_device *idev = info->uio_dev;
49304
49305 - atomic_inc(&idev->event);
49306 + atomic_inc_unchecked(&idev->event);
49307 wake_up_interruptible(&idev->wait);
49308 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
49309 }
49310 @@ -454,7 +455,7 @@ static int uio_open(struct inode *inode, struct file *filep)
49311 }
49312
49313 listener->dev = idev;
49314 - listener->event_count = atomic_read(&idev->event);
49315 + listener->event_count = atomic_read_unchecked(&idev->event);
49316 filep->private_data = listener;
49317
49318 if (idev->info->open) {
49319 @@ -505,7 +506,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
49320 return -EIO;
49321
49322 poll_wait(filep, &idev->wait, wait);
49323 - if (listener->event_count != atomic_read(&idev->event))
49324 + if (listener->event_count != atomic_read_unchecked(&idev->event))
49325 return POLLIN | POLLRDNORM;
49326 return 0;
49327 }
49328 @@ -530,7 +531,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
49329 do {
49330 set_current_state(TASK_INTERRUPTIBLE);
49331
49332 - event_count = atomic_read(&idev->event);
49333 + event_count = atomic_read_unchecked(&idev->event);
49334 if (event_count != listener->event_count) {
49335 if (copy_to_user(buf, &event_count, count))
49336 retval = -EFAULT;
49337 @@ -587,9 +588,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
49338 static int uio_find_mem_index(struct vm_area_struct *vma)
49339 {
49340 struct uio_device *idev = vma->vm_private_data;
49341 + unsigned long size;
49342
49343 if (vma->vm_pgoff < MAX_UIO_MAPS) {
49344 - if (idev->info->mem[vma->vm_pgoff].size == 0)
49345 + size = idev->info->mem[vma->vm_pgoff].size;
49346 + if (size == 0)
49347 + return -1;
49348 + if (vma->vm_end - vma->vm_start > size)
49349 return -1;
49350 return (int)vma->vm_pgoff;
49351 }
49352 @@ -647,6 +652,8 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
49353 return -EINVAL;
49354 mem = idev->info->mem + mi;
49355
49356 + if (mem->addr & ~PAGE_MASK)
49357 + return -ENODEV;
49358 if (vma->vm_end - vma->vm_start > mem->size)
49359 return -EINVAL;
49360
49361 @@ -818,7 +825,7 @@ int __uio_register_device(struct module *owner,
49362 idev->owner = owner;
49363 idev->info = info;
49364 init_waitqueue_head(&idev->wait);
49365 - atomic_set(&idev->event, 0);
49366 + atomic_set_unchecked(&idev->event, 0);
49367
49368 ret = uio_get_minor(idev);
49369 if (ret)
49370 diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
49371 index 8a7eb77..c00402f 100644
49372 --- a/drivers/usb/atm/cxacru.c
49373 +++ b/drivers/usb/atm/cxacru.c
49374 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
49375 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
49376 if (ret < 2)
49377 return -EINVAL;
49378 - if (index < 0 || index > 0x7f)
49379 + if (index > 0x7f)
49380 return -EINVAL;
49381 pos += tmp;
49382
49383 diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
49384 index 25a7bfc..57f3cf5 100644
49385 --- a/drivers/usb/atm/usbatm.c
49386 +++ b/drivers/usb/atm/usbatm.c
49387 @@ -331,7 +331,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
49388 if (printk_ratelimit())
49389 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
49390 __func__, vpi, vci);
49391 - atomic_inc(&vcc->stats->rx_err);
49392 + atomic_inc_unchecked(&vcc->stats->rx_err);
49393 return;
49394 }
49395
49396 @@ -358,7 +358,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
49397 if (length > ATM_MAX_AAL5_PDU) {
49398 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
49399 __func__, length, vcc);
49400 - atomic_inc(&vcc->stats->rx_err);
49401 + atomic_inc_unchecked(&vcc->stats->rx_err);
49402 goto out;
49403 }
49404
49405 @@ -367,14 +367,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
49406 if (sarb->len < pdu_length) {
49407 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
49408 __func__, pdu_length, sarb->len, vcc);
49409 - atomic_inc(&vcc->stats->rx_err);
49410 + atomic_inc_unchecked(&vcc->stats->rx_err);
49411 goto out;
49412 }
49413
49414 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
49415 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
49416 __func__, vcc);
49417 - atomic_inc(&vcc->stats->rx_err);
49418 + atomic_inc_unchecked(&vcc->stats->rx_err);
49419 goto out;
49420 }
49421
49422 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
49423 if (printk_ratelimit())
49424 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
49425 __func__, length);
49426 - atomic_inc(&vcc->stats->rx_drop);
49427 + atomic_inc_unchecked(&vcc->stats->rx_drop);
49428 goto out;
49429 }
49430
49431 @@ -414,7 +414,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
49432
49433 vcc->push(vcc, skb);
49434
49435 - atomic_inc(&vcc->stats->rx);
49436 + atomic_inc_unchecked(&vcc->stats->rx);
49437 out:
49438 skb_trim(sarb, 0);
49439 }
49440 @@ -612,7 +612,7 @@ static void usbatm_tx_process(unsigned long data)
49441 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
49442
49443 usbatm_pop(vcc, skb);
49444 - atomic_inc(&vcc->stats->tx);
49445 + atomic_inc_unchecked(&vcc->stats->tx);
49446
49447 skb = skb_dequeue(&instance->sndqueue);
49448 }
49449 @@ -756,11 +756,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
49450 if (!left--)
49451 return sprintf(page,
49452 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
49453 - atomic_read(&atm_dev->stats.aal5.tx),
49454 - atomic_read(&atm_dev->stats.aal5.tx_err),
49455 - atomic_read(&atm_dev->stats.aal5.rx),
49456 - atomic_read(&atm_dev->stats.aal5.rx_err),
49457 - atomic_read(&atm_dev->stats.aal5.rx_drop));
49458 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
49459 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
49460 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
49461 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
49462 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
49463
49464 if (!left--) {
49465 if (instance->disconnected)
49466 diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
49467 index 2a3bbdf..91d72cf 100644
49468 --- a/drivers/usb/core/devices.c
49469 +++ b/drivers/usb/core/devices.c
49470 @@ -126,7 +126,7 @@ static const char format_endpt[] =
49471 * time it gets called.
49472 */
49473 static struct device_connect_event {
49474 - atomic_t count;
49475 + atomic_unchecked_t count;
49476 wait_queue_head_t wait;
49477 } device_event = {
49478 .count = ATOMIC_INIT(1),
49479 @@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
49480
49481 void usbfs_conn_disc_event(void)
49482 {
49483 - atomic_add(2, &device_event.count);
49484 + atomic_add_unchecked(2, &device_event.count);
49485 wake_up(&device_event.wait);
49486 }
49487
49488 @@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
49489
49490 poll_wait(file, &device_event.wait, wait);
49491
49492 - event_count = atomic_read(&device_event.count);
49493 + event_count = atomic_read_unchecked(&device_event.count);
49494 if (file->f_version != event_count) {
49495 file->f_version = event_count;
49496 return POLLIN | POLLRDNORM;
49497 diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
49498 index 71dc5d7..300db0e 100644
49499 --- a/drivers/usb/core/devio.c
49500 +++ b/drivers/usb/core/devio.c
49501 @@ -187,7 +187,7 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
49502 struct dev_state *ps = file->private_data;
49503 struct usb_device *dev = ps->dev;
49504 ssize_t ret = 0;
49505 - unsigned len;
49506 + size_t len;
49507 loff_t pos;
49508 int i;
49509
49510 @@ -229,16 +229,16 @@ static ssize_t usbdev_read(struct file *file, char __user *buf, size_t nbytes,
49511 for (i = 0; nbytes && i < dev->descriptor.bNumConfigurations; i++) {
49512 struct usb_config_descriptor *config =
49513 (struct usb_config_descriptor *)dev->rawdescriptors[i];
49514 - unsigned int length = le16_to_cpu(config->wTotalLength);
49515 + size_t length = le16_to_cpu(config->wTotalLength);
49516
49517 if (*ppos < pos + length) {
49518
49519 /* The descriptor may claim to be longer than it
49520 * really is. Here is the actual allocated length. */
49521 - unsigned alloclen =
49522 + size_t alloclen =
49523 le16_to_cpu(dev->config[i].desc.wTotalLength);
49524
49525 - len = length - (*ppos - pos);
49526 + len = length + pos - *ppos;
49527 if (len > nbytes)
49528 len = nbytes;
49529
49530 diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
49531 index f20a044..d1059aa 100644
49532 --- a/drivers/usb/core/hcd.c
49533 +++ b/drivers/usb/core/hcd.c
49534 @@ -1552,7 +1552,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
49535 */
49536 usb_get_urb(urb);
49537 atomic_inc(&urb->use_count);
49538 - atomic_inc(&urb->dev->urbnum);
49539 + atomic_inc_unchecked(&urb->dev->urbnum);
49540 usbmon_urb_submit(&hcd->self, urb);
49541
49542 /* NOTE requirements on root-hub callers (usbfs and the hub
49543 @@ -1579,7 +1579,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
49544 urb->hcpriv = NULL;
49545 INIT_LIST_HEAD(&urb->urb_list);
49546 atomic_dec(&urb->use_count);
49547 - atomic_dec(&urb->dev->urbnum);
49548 + atomic_dec_unchecked(&urb->dev->urbnum);
49549 if (atomic_read(&urb->reject))
49550 wake_up(&usb_kill_urb_queue);
49551 usb_put_urb(urb);
49552 diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
49553 index c5c3667..e54e5cd 100644
49554 --- a/drivers/usb/core/hub.c
49555 +++ b/drivers/usb/core/hub.c
49556 @@ -27,6 +27,7 @@
49557 #include <linux/freezer.h>
49558 #include <linux/random.h>
49559 #include <linux/pm_qos.h>
49560 +#include <linux/grsecurity.h>
49561
49562 #include <asm/uaccess.h>
49563 #include <asm/byteorder.h>
49564 @@ -4467,6 +4468,10 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
49565 goto done;
49566 return;
49567 }
49568 +
49569 + if (gr_handle_new_usb())
49570 + goto done;
49571 +
49572 if (hub_is_superspeed(hub->hdev))
49573 unit_load = 150;
49574 else
49575 diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
49576 index 82927e1..4993dbf 100644
49577 --- a/drivers/usb/core/message.c
49578 +++ b/drivers/usb/core/message.c
49579 @@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
49580 * Return: If successful, the number of bytes transferred. Otherwise, a negative
49581 * error number.
49582 */
49583 -int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
49584 +int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
49585 __u8 requesttype, __u16 value, __u16 index, void *data,
49586 __u16 size, int timeout)
49587 {
49588 @@ -181,7 +181,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
49589 * If successful, 0. Otherwise a negative error number. The number of actual
49590 * bytes transferred will be stored in the @actual_length paramater.
49591 */
49592 -int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
49593 +int __intentional_overflow(-1) usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
49594 void *data, int len, int *actual_length, int timeout)
49595 {
49596 return usb_bulk_msg(usb_dev, pipe, data, len, actual_length, timeout);
49597 @@ -221,7 +221,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
49598 * bytes transferred will be stored in the @actual_length paramater.
49599 *
49600 */
49601 -int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
49602 +int __intentional_overflow(-1) usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
49603 void *data, int len, int *actual_length, int timeout)
49604 {
49605 struct urb *urb;
49606 diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
49607 index ca516ac..6c36ee4 100644
49608 --- a/drivers/usb/core/sysfs.c
49609 +++ b/drivers/usb/core/sysfs.c
49610 @@ -236,7 +236,7 @@ static ssize_t urbnum_show(struct device *dev, struct device_attribute *attr,
49611 struct usb_device *udev;
49612
49613 udev = to_usb_device(dev);
49614 - return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
49615 + return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
49616 }
49617 static DEVICE_ATTR_RO(urbnum);
49618
49619 diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
49620 index 0a6ee2e..6f8d7e8 100644
49621 --- a/drivers/usb/core/usb.c
49622 +++ b/drivers/usb/core/usb.c
49623 @@ -433,7 +433,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
49624 set_dev_node(&dev->dev, dev_to_node(bus->controller));
49625 dev->state = USB_STATE_ATTACHED;
49626 dev->lpm_disable_count = 1;
49627 - atomic_set(&dev->urbnum, 0);
49628 + atomic_set_unchecked(&dev->urbnum, 0);
49629
49630 INIT_LIST_HEAD(&dev->ep0.urb_list);
49631 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
49632 diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
49633 index 02e44fc..3c4fe64 100644
49634 --- a/drivers/usb/dwc3/gadget.c
49635 +++ b/drivers/usb/dwc3/gadget.c
49636 @@ -532,8 +532,6 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
49637 if (!usb_endpoint_xfer_isoc(desc))
49638 return 0;
49639
49640 - memset(&trb_link, 0, sizeof(trb_link));
49641 -
49642 /* Link TRB for ISOC. The HWO bit is never reset */
49643 trb_st_hw = &dep->trb_pool[0];
49644
49645 diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
49646 index 5e29dde..eca992f 100644
49647 --- a/drivers/usb/early/ehci-dbgp.c
49648 +++ b/drivers/usb/early/ehci-dbgp.c
49649 @@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
49650
49651 #ifdef CONFIG_KGDB
49652 static struct kgdb_io kgdbdbgp_io_ops;
49653 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
49654 +static struct kgdb_io kgdbdbgp_io_ops_console;
49655 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
49656 #else
49657 #define dbgp_kgdb_mode (0)
49658 #endif
49659 @@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
49660 .write_char = kgdbdbgp_write_char,
49661 };
49662
49663 +static struct kgdb_io kgdbdbgp_io_ops_console = {
49664 + .name = "kgdbdbgp",
49665 + .read_char = kgdbdbgp_read_char,
49666 + .write_char = kgdbdbgp_write_char,
49667 + .is_console = 1
49668 +};
49669 +
49670 static int kgdbdbgp_wait_time;
49671
49672 static int __init kgdbdbgp_parse_config(char *str)
49673 @@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
49674 ptr++;
49675 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
49676 }
49677 - kgdb_register_io_module(&kgdbdbgp_io_ops);
49678 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
49679 + if (early_dbgp_console.index != -1)
49680 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
49681 + else
49682 + kgdb_register_io_module(&kgdbdbgp_io_ops);
49683
49684 return 0;
49685 }
49686 diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
49687 index b369292..9f3ba40 100644
49688 --- a/drivers/usb/gadget/u_serial.c
49689 +++ b/drivers/usb/gadget/u_serial.c
49690 @@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
49691 spin_lock_irq(&port->port_lock);
49692
49693 /* already open? Great. */
49694 - if (port->port.count) {
49695 + if (atomic_read(&port->port.count)) {
49696 status = 0;
49697 - port->port.count++;
49698 + atomic_inc(&port->port.count);
49699
49700 /* currently opening/closing? wait ... */
49701 } else if (port->openclose) {
49702 @@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
49703 tty->driver_data = port;
49704 port->port.tty = tty;
49705
49706 - port->port.count = 1;
49707 + atomic_set(&port->port.count, 1);
49708 port->openclose = false;
49709
49710 /* if connected, start the I/O stream */
49711 @@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
49712
49713 spin_lock_irq(&port->port_lock);
49714
49715 - if (port->port.count != 1) {
49716 - if (port->port.count == 0)
49717 + if (atomic_read(&port->port.count) != 1) {
49718 + if (atomic_read(&port->port.count) == 0)
49719 WARN_ON(1);
49720 else
49721 - --port->port.count;
49722 + atomic_dec(&port->port.count);
49723 goto exit;
49724 }
49725
49726 @@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
49727 * and sleep if necessary
49728 */
49729 port->openclose = true;
49730 - port->port.count = 0;
49731 + atomic_set(&port->port.count, 0);
49732
49733 gser = port->port_usb;
49734 if (gser && gser->disconnect)
49735 @@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
49736 int cond;
49737
49738 spin_lock_irq(&port->port_lock);
49739 - cond = (port->port.count == 0) && !port->openclose;
49740 + cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
49741 spin_unlock_irq(&port->port_lock);
49742 return cond;
49743 }
49744 @@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
49745 /* if it's already open, start I/O ... and notify the serial
49746 * protocol about open/close status (connect/disconnect).
49747 */
49748 - if (port->port.count) {
49749 + if (atomic_read(&port->port.count)) {
49750 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
49751 gs_start_io(port);
49752 if (gser->connect)
49753 @@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
49754
49755 port->port_usb = NULL;
49756 gser->ioport = NULL;
49757 - if (port->port.count > 0 || port->openclose) {
49758 + if (atomic_read(&port->port.count) > 0 || port->openclose) {
49759 wake_up_interruptible(&port->drain_wait);
49760 if (port->port.tty)
49761 tty_hangup(port->port.tty);
49762 @@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
49763
49764 /* finally, free any unused/unusable I/O buffers */
49765 spin_lock_irqsave(&port->port_lock, flags);
49766 - if (port->port.count == 0 && !port->openclose)
49767 + if (atomic_read(&port->port.count) == 0 && !port->openclose)
49768 gs_buf_free(&port->port_write_buf);
49769 gs_free_requests(gser->out, &port->read_pool, NULL);
49770 gs_free_requests(gser->out, &port->read_queue, NULL);
49771 diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
49772 index 835fc08..f8b22bf 100644
49773 --- a/drivers/usb/host/ehci-hub.c
49774 +++ b/drivers/usb/host/ehci-hub.c
49775 @@ -762,7 +762,7 @@ static struct urb *request_single_step_set_feature_urb(
49776 urb->transfer_flags = URB_DIR_IN;
49777 usb_get_urb(urb);
49778 atomic_inc(&urb->use_count);
49779 - atomic_inc(&urb->dev->urbnum);
49780 + atomic_inc_unchecked(&urb->dev->urbnum);
49781 urb->setup_dma = dma_map_single(
49782 hcd->self.controller,
49783 urb->setup_packet,
49784 @@ -829,7 +829,7 @@ static int ehset_single_step_set_feature(struct usb_hcd *hcd, int port)
49785 urb->status = -EINPROGRESS;
49786 usb_get_urb(urb);
49787 atomic_inc(&urb->use_count);
49788 - atomic_inc(&urb->dev->urbnum);
49789 + atomic_inc_unchecked(&urb->dev->urbnum);
49790 retval = submit_single_step_set_feature(hcd, urb, 0);
49791 if (!retval && !wait_for_completion_timeout(&done,
49792 msecs_to_jiffies(2000))) {
49793 diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
49794 index ba6a5d6..f88f7f3 100644
49795 --- a/drivers/usb/misc/appledisplay.c
49796 +++ b/drivers/usb/misc/appledisplay.c
49797 @@ -83,7 +83,7 @@ struct appledisplay {
49798 spinlock_t lock;
49799 };
49800
49801 -static atomic_t count_displays = ATOMIC_INIT(0);
49802 +static atomic_unchecked_t count_displays = ATOMIC_INIT(0);
49803 static struct workqueue_struct *wq;
49804
49805 static void appledisplay_complete(struct urb *urb)
49806 @@ -281,7 +281,7 @@ static int appledisplay_probe(struct usb_interface *iface,
49807
49808 /* Register backlight device */
49809 snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
49810 - atomic_inc_return(&count_displays) - 1);
49811 + atomic_inc_return_unchecked(&count_displays) - 1);
49812 memset(&props, 0, sizeof(struct backlight_properties));
49813 props.type = BACKLIGHT_RAW;
49814 props.max_brightness = 0xff;
49815 diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
49816 index c69bb50..215ef37 100644
49817 --- a/drivers/usb/serial/console.c
49818 +++ b/drivers/usb/serial/console.c
49819 @@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
49820
49821 info->port = port;
49822
49823 - ++port->port.count;
49824 + atomic_inc(&port->port.count);
49825 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
49826 if (serial->type->set_termios) {
49827 /*
49828 @@ -170,7 +170,7 @@ static int usb_console_setup(struct console *co, char *options)
49829 }
49830 /* Now that any required fake tty operations are completed restore
49831 * the tty port count */
49832 - --port->port.count;
49833 + atomic_dec(&port->port.count);
49834 /* The console is special in terms of closing the device so
49835 * indicate this port is now acting as a system console. */
49836 port->port.console = 1;
49837 @@ -183,7 +183,7 @@ static int usb_console_setup(struct console *co, char *options)
49838 free_tty:
49839 kfree(tty);
49840 reset_open_count:
49841 - port->port.count = 0;
49842 + atomic_set(&port->port.count, 0);
49843 usb_autopm_put_interface(serial->interface);
49844 error_get_interface:
49845 usb_serial_put(serial);
49846 @@ -194,7 +194,7 @@ static int usb_console_setup(struct console *co, char *options)
49847 static void usb_console_write(struct console *co,
49848 const char *buf, unsigned count)
49849 {
49850 - static struct usbcons_info *info = &usbcons_info;
49851 + struct usbcons_info *info = &usbcons_info;
49852 struct usb_serial_port *port = info->port;
49853 struct usb_serial *serial;
49854 int retval = -ENODEV;
49855 diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
49856 index 75f70f0..d467e1a 100644
49857 --- a/drivers/usb/storage/usb.h
49858 +++ b/drivers/usb/storage/usb.h
49859 @@ -63,7 +63,7 @@ struct us_unusual_dev {
49860 __u8 useProtocol;
49861 __u8 useTransport;
49862 int (*initFunction)(struct us_data *);
49863 -};
49864 +} __do_const;
49865
49866
49867 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
49868 diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
49869 index cf250c2..ad9d904 100644
49870 --- a/drivers/usb/wusbcore/wa-hc.h
49871 +++ b/drivers/usb/wusbcore/wa-hc.h
49872 @@ -199,7 +199,7 @@ struct wahc {
49873 spinlock_t xfer_list_lock;
49874 struct work_struct xfer_enqueue_work;
49875 struct work_struct xfer_error_work;
49876 - atomic_t xfer_id_count;
49877 + atomic_unchecked_t xfer_id_count;
49878 };
49879
49880
49881 @@ -255,7 +255,7 @@ static inline void wa_init(struct wahc *wa)
49882 spin_lock_init(&wa->xfer_list_lock);
49883 INIT_WORK(&wa->xfer_enqueue_work, wa_urb_enqueue_run);
49884 INIT_WORK(&wa->xfer_error_work, wa_process_errored_transfers_run);
49885 - atomic_set(&wa->xfer_id_count, 1);
49886 + atomic_set_unchecked(&wa->xfer_id_count, 1);
49887 }
49888
49889 /**
49890 diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
49891 index 3dcf66f..8faaf6e 100644
49892 --- a/drivers/usb/wusbcore/wa-xfer.c
49893 +++ b/drivers/usb/wusbcore/wa-xfer.c
49894 @@ -300,7 +300,7 @@ out:
49895 */
49896 static void wa_xfer_id_init(struct wa_xfer *xfer)
49897 {
49898 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
49899 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
49900 }
49901
49902 /*
49903 diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
49904 index 1eab4ac..e21efc9 100644
49905 --- a/drivers/vfio/vfio.c
49906 +++ b/drivers/vfio/vfio.c
49907 @@ -488,7 +488,7 @@ static int vfio_group_nb_add_dev(struct vfio_group *group, struct device *dev)
49908 return 0;
49909
49910 /* TODO Prevent device auto probing */
49911 - WARN("Device %s added to live group %d!\n", dev_name(dev),
49912 + WARN(1, "Device %s added to live group %d!\n", dev_name(dev),
49913 iommu_group_id(group->iommu_group));
49914
49915 return 0;
49916 diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
49917 index 5174eba..86e764a 100644
49918 --- a/drivers/vhost/vringh.c
49919 +++ b/drivers/vhost/vringh.c
49920 @@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
49921
49922 static inline int putu16_kern(u16 *p, u16 val)
49923 {
49924 - ACCESS_ONCE(*p) = val;
49925 + ACCESS_ONCE_RW(*p) = val;
49926 return 0;
49927 }
49928
49929 diff --git a/drivers/video/arcfb.c b/drivers/video/arcfb.c
49930 index e43401a..dd49b3f 100644
49931 --- a/drivers/video/arcfb.c
49932 +++ b/drivers/video/arcfb.c
49933 @@ -458,7 +458,7 @@ static ssize_t arcfb_write(struct fb_info *info, const char __user *buf,
49934 return -ENOSPC;
49935
49936 err = 0;
49937 - if ((count + p) > fbmemlength) {
49938 + if (count > (fbmemlength - p)) {
49939 count = fbmemlength - p;
49940 err = -ENOSPC;
49941 }
49942 diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
49943 index a4dfe8c..297ddd9 100644
49944 --- a/drivers/video/aty/aty128fb.c
49945 +++ b/drivers/video/aty/aty128fb.c
49946 @@ -149,7 +149,7 @@ enum {
49947 };
49948
49949 /* Must match above enum */
49950 -static char * const r128_family[] = {
49951 +static const char * const r128_family[] = {
49952 "AGP",
49953 "PCI",
49954 "PRO AGP",
49955 diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
49956 index 9b0f12c..024673d 100644
49957 --- a/drivers/video/aty/atyfb_base.c
49958 +++ b/drivers/video/aty/atyfb_base.c
49959 @@ -1326,10 +1326,14 @@ static int atyfb_set_par(struct fb_info *info)
49960 par->accel_flags = var->accel_flags; /* hack */
49961
49962 if (var->accel_flags) {
49963 - info->fbops->fb_sync = atyfb_sync;
49964 + pax_open_kernel();
49965 + *(void **)&info->fbops->fb_sync = atyfb_sync;
49966 + pax_close_kernel();
49967 info->flags &= ~FBINFO_HWACCEL_DISABLED;
49968 } else {
49969 - info->fbops->fb_sync = NULL;
49970 + pax_open_kernel();
49971 + *(void **)&info->fbops->fb_sync = NULL;
49972 + pax_close_kernel();
49973 info->flags |= FBINFO_HWACCEL_DISABLED;
49974 }
49975
49976 diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
49977 index 95ec042..e6affdd 100644
49978 --- a/drivers/video/aty/mach64_cursor.c
49979 +++ b/drivers/video/aty/mach64_cursor.c
49980 @@ -7,6 +7,7 @@
49981 #include <linux/string.h>
49982
49983 #include <asm/io.h>
49984 +#include <asm/pgtable.h>
49985
49986 #ifdef __sparc__
49987 #include <asm/fbio.h>
49988 @@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
49989 info->sprite.buf_align = 16; /* and 64 lines tall. */
49990 info->sprite.flags = FB_PIXMAP_IO;
49991
49992 - info->fbops->fb_cursor = atyfb_cursor;
49993 + pax_open_kernel();
49994 + *(void **)&info->fbops->fb_cursor = atyfb_cursor;
49995 + pax_close_kernel();
49996
49997 return 0;
49998 }
49999 diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
50000 index bca6ccc..252107e 100644
50001 --- a/drivers/video/backlight/kb3886_bl.c
50002 +++ b/drivers/video/backlight/kb3886_bl.c
50003 @@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
50004 static unsigned long kb3886bl_flags;
50005 #define KB3886BL_SUSPENDED 0x01
50006
50007 -static struct dmi_system_id __initdata kb3886bl_device_table[] = {
50008 +static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
50009 {
50010 .ident = "Sahara Touch-iT",
50011 .matches = {
50012 diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
50013 index 900aa4e..6d49418 100644
50014 --- a/drivers/video/fb_defio.c
50015 +++ b/drivers/video/fb_defio.c
50016 @@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
50017
50018 BUG_ON(!fbdefio);
50019 mutex_init(&fbdefio->lock);
50020 - info->fbops->fb_mmap = fb_deferred_io_mmap;
50021 + pax_open_kernel();
50022 + *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
50023 + pax_close_kernel();
50024 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
50025 INIT_LIST_HEAD(&fbdefio->pagelist);
50026 if (fbdefio->delay == 0) /* set a default of 1 s */
50027 @@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
50028 page->mapping = NULL;
50029 }
50030
50031 - info->fbops->fb_mmap = NULL;
50032 + *(void **)&info->fbops->fb_mmap = NULL;
50033 mutex_destroy(&fbdefio->lock);
50034 }
50035 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
50036 diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
50037 index dacaf74..8478a46 100644
50038 --- a/drivers/video/fbmem.c
50039 +++ b/drivers/video/fbmem.c
50040 @@ -433,7 +433,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
50041 image->dx += image->width + 8;
50042 }
50043 } else if (rotate == FB_ROTATE_UD) {
50044 - for (x = 0; x < num && image->dx >= 0; x++) {
50045 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
50046 info->fbops->fb_imageblit(info, image);
50047 image->dx -= image->width + 8;
50048 }
50049 @@ -445,7 +445,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
50050 image->dy += image->height + 8;
50051 }
50052 } else if (rotate == FB_ROTATE_CCW) {
50053 - for (x = 0; x < num && image->dy >= 0; x++) {
50054 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
50055 info->fbops->fb_imageblit(info, image);
50056 image->dy -= image->height + 8;
50057 }
50058 @@ -1175,7 +1175,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
50059 return -EFAULT;
50060 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
50061 return -EINVAL;
50062 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
50063 + if (con2fb.framebuffer >= FB_MAX)
50064 return -EINVAL;
50065 if (!registered_fb[con2fb.framebuffer])
50066 request_module("fb%d", con2fb.framebuffer);
50067 diff --git a/drivers/video/hyperv_fb.c b/drivers/video/hyperv_fb.c
50068 index 8d456dc..b4fa44b 100644
50069 --- a/drivers/video/hyperv_fb.c
50070 +++ b/drivers/video/hyperv_fb.c
50071 @@ -233,7 +233,7 @@ static uint screen_fb_size;
50072 static inline int synthvid_send(struct hv_device *hdev,
50073 struct synthvid_msg *msg)
50074 {
50075 - static atomic64_t request_id = ATOMIC64_INIT(0);
50076 + static atomic64_unchecked_t request_id = ATOMIC64_INIT(0);
50077 int ret;
50078
50079 msg->pipe_hdr.type = PIPE_MSG_DATA;
50080 @@ -241,7 +241,7 @@ static inline int synthvid_send(struct hv_device *hdev,
50081
50082 ret = vmbus_sendpacket(hdev->channel, msg,
50083 msg->vid_hdr.size + sizeof(struct pipe_msg_hdr),
50084 - atomic64_inc_return(&request_id),
50085 + atomic64_inc_return_unchecked(&request_id),
50086 VM_PKT_DATA_INBAND, 0);
50087
50088 if (ret)
50089 diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
50090 index 7672d2e..b56437f 100644
50091 --- a/drivers/video/i810/i810_accel.c
50092 +++ b/drivers/video/i810/i810_accel.c
50093 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
50094 }
50095 }
50096 printk("ringbuffer lockup!!!\n");
50097 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
50098 i810_report_error(mmio);
50099 par->dev_flags |= LOCKUP;
50100 info->pixmap.scan_align = 1;
50101 diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
50102 index 3c14e43..2630570 100644
50103 --- a/drivers/video/logo/logo_linux_clut224.ppm
50104 +++ b/drivers/video/logo/logo_linux_clut224.ppm
50105 @@ -2,1603 +2,1123 @@ P3
50106 # Standard 224-color Linux logo
50107 80 80
50108 255
50109 - 0 0 0 0 0 0 0 0 0 0 0 0
50110 - 0 0 0 0 0 0 0 0 0 0 0 0
50111 - 0 0 0 0 0 0 0 0 0 0 0 0
50112 - 0 0 0 0 0 0 0 0 0 0 0 0
50113 - 0 0 0 0 0 0 0 0 0 0 0 0
50114 - 0 0 0 0 0 0 0 0 0 0 0 0
50115 - 0 0 0 0 0 0 0 0 0 0 0 0
50116 - 0 0 0 0 0 0 0 0 0 0 0 0
50117 - 0 0 0 0 0 0 0 0 0 0 0 0
50118 - 6 6 6 6 6 6 10 10 10 10 10 10
50119 - 10 10 10 6 6 6 6 6 6 6 6 6
50120 - 0 0 0 0 0 0 0 0 0 0 0 0
50121 - 0 0 0 0 0 0 0 0 0 0 0 0
50122 - 0 0 0 0 0 0 0 0 0 0 0 0
50123 - 0 0 0 0 0 0 0 0 0 0 0 0
50124 - 0 0 0 0 0 0 0 0 0 0 0 0
50125 - 0 0 0 0 0 0 0 0 0 0 0 0
50126 - 0 0 0 0 0 0 0 0 0 0 0 0
50127 - 0 0 0 0 0 0 0 0 0 0 0 0
50128 - 0 0 0 0 0 0 0 0 0 0 0 0
50129 - 0 0 0 0 0 0 0 0 0 0 0 0
50130 - 0 0 0 0 0 0 0 0 0 0 0 0
50131 - 0 0 0 0 0 0 0 0 0 0 0 0
50132 - 0 0 0 0 0 0 0 0 0 0 0 0
50133 - 0 0 0 0 0 0 0 0 0 0 0 0
50134 - 0 0 0 0 0 0 0 0 0 0 0 0
50135 - 0 0 0 0 0 0 0 0 0 0 0 0
50136 - 0 0 0 0 0 0 0 0 0 0 0 0
50137 - 0 0 0 6 6 6 10 10 10 14 14 14
50138 - 22 22 22 26 26 26 30 30 30 34 34 34
50139 - 30 30 30 30 30 30 26 26 26 18 18 18
50140 - 14 14 14 10 10 10 6 6 6 0 0 0
50141 - 0 0 0 0 0 0 0 0 0 0 0 0
50142 - 0 0 0 0 0 0 0 0 0 0 0 0
50143 - 0 0 0 0 0 0 0 0 0 0 0 0
50144 - 0 0 0 0 0 0 0 0 0 0 0 0
50145 - 0 0 0 0 0 0 0 0 0 0 0 0
50146 - 0 0 0 0 0 0 0 0 0 0 0 0
50147 - 0 0 0 0 0 0 0 0 0 0 0 0
50148 - 0 0 0 0 0 0 0 0 0 0 0 0
50149 - 0 0 0 0 0 0 0 0 0 0 0 0
50150 - 0 0 0 0 0 1 0 0 1 0 0 0
50151 - 0 0 0 0 0 0 0 0 0 0 0 0
50152 - 0 0 0 0 0 0 0 0 0 0 0 0
50153 - 0 0 0 0 0 0 0 0 0 0 0 0
50154 - 0 0 0 0 0 0 0 0 0 0 0 0
50155 - 0 0 0 0 0 0 0 0 0 0 0 0
50156 - 0 0 0 0 0 0 0 0 0 0 0 0
50157 - 6 6 6 14 14 14 26 26 26 42 42 42
50158 - 54 54 54 66 66 66 78 78 78 78 78 78
50159 - 78 78 78 74 74 74 66 66 66 54 54 54
50160 - 42 42 42 26 26 26 18 18 18 10 10 10
50161 - 6 6 6 0 0 0 0 0 0 0 0 0
50162 - 0 0 0 0 0 0 0 0 0 0 0 0
50163 - 0 0 0 0 0 0 0 0 0 0 0 0
50164 - 0 0 0 0 0 0 0 0 0 0 0 0
50165 - 0 0 0 0 0 0 0 0 0 0 0 0
50166 - 0 0 0 0 0 0 0 0 0 0 0 0
50167 - 0 0 0 0 0 0 0 0 0 0 0 0
50168 - 0 0 0 0 0 0 0 0 0 0 0 0
50169 - 0 0 0 0 0 0 0 0 0 0 0 0
50170 - 0 0 1 0 0 0 0 0 0 0 0 0
50171 - 0 0 0 0 0 0 0 0 0 0 0 0
50172 - 0 0 0 0 0 0 0 0 0 0 0 0
50173 - 0 0 0 0 0 0 0 0 0 0 0 0
50174 - 0 0 0 0 0 0 0 0 0 0 0 0
50175 - 0 0 0 0 0 0 0 0 0 0 0 0
50176 - 0 0 0 0 0 0 0 0 0 10 10 10
50177 - 22 22 22 42 42 42 66 66 66 86 86 86
50178 - 66 66 66 38 38 38 38 38 38 22 22 22
50179 - 26 26 26 34 34 34 54 54 54 66 66 66
50180 - 86 86 86 70 70 70 46 46 46 26 26 26
50181 - 14 14 14 6 6 6 0 0 0 0 0 0
50182 - 0 0 0 0 0 0 0 0 0 0 0 0
50183 - 0 0 0 0 0 0 0 0 0 0 0 0
50184 - 0 0 0 0 0 0 0 0 0 0 0 0
50185 - 0 0 0 0 0 0 0 0 0 0 0 0
50186 - 0 0 0 0 0 0 0 0 0 0 0 0
50187 - 0 0 0 0 0 0 0 0 0 0 0 0
50188 - 0 0 0 0 0 0 0 0 0 0 0 0
50189 - 0 0 0 0 0 0 0 0 0 0 0 0
50190 - 0 0 1 0 0 1 0 0 1 0 0 0
50191 - 0 0 0 0 0 0 0 0 0 0 0 0
50192 - 0 0 0 0 0 0 0 0 0 0 0 0
50193 - 0 0 0 0 0 0 0 0 0 0 0 0
50194 - 0 0 0 0 0 0 0 0 0 0 0 0
50195 - 0 0 0 0 0 0 0 0 0 0 0 0
50196 - 0 0 0 0 0 0 10 10 10 26 26 26
50197 - 50 50 50 82 82 82 58 58 58 6 6 6
50198 - 2 2 6 2 2 6 2 2 6 2 2 6
50199 - 2 2 6 2 2 6 2 2 6 2 2 6
50200 - 6 6 6 54 54 54 86 86 86 66 66 66
50201 - 38 38 38 18 18 18 6 6 6 0 0 0
50202 - 0 0 0 0 0 0 0 0 0 0 0 0
50203 - 0 0 0 0 0 0 0 0 0 0 0 0
50204 - 0 0 0 0 0 0 0 0 0 0 0 0
50205 - 0 0 0 0 0 0 0 0 0 0 0 0
50206 - 0 0 0 0 0 0 0 0 0 0 0 0
50207 - 0 0 0 0 0 0 0 0 0 0 0 0
50208 - 0 0 0 0 0 0 0 0 0 0 0 0
50209 - 0 0 0 0 0 0 0 0 0 0 0 0
50210 - 0 0 0 0 0 0 0 0 0 0 0 0
50211 - 0 0 0 0 0 0 0 0 0 0 0 0
50212 - 0 0 0 0 0 0 0 0 0 0 0 0
50213 - 0 0 0 0 0 0 0 0 0 0 0 0
50214 - 0 0 0 0 0 0 0 0 0 0 0 0
50215 - 0 0 0 0 0 0 0 0 0 0 0 0
50216 - 0 0 0 6 6 6 22 22 22 50 50 50
50217 - 78 78 78 34 34 34 2 2 6 2 2 6
50218 - 2 2 6 2 2 6 2 2 6 2 2 6
50219 - 2 2 6 2 2 6 2 2 6 2 2 6
50220 - 2 2 6 2 2 6 6 6 6 70 70 70
50221 - 78 78 78 46 46 46 22 22 22 6 6 6
50222 - 0 0 0 0 0 0 0 0 0 0 0 0
50223 - 0 0 0 0 0 0 0 0 0 0 0 0
50224 - 0 0 0 0 0 0 0 0 0 0 0 0
50225 - 0 0 0 0 0 0 0 0 0 0 0 0
50226 - 0 0 0 0 0 0 0 0 0 0 0 0
50227 - 0 0 0 0 0 0 0 0 0 0 0 0
50228 - 0 0 0 0 0 0 0 0 0 0 0 0
50229 - 0 0 0 0 0 0 0 0 0 0 0 0
50230 - 0 0 1 0 0 1 0 0 1 0 0 0
50231 - 0 0 0 0 0 0 0 0 0 0 0 0
50232 - 0 0 0 0 0 0 0 0 0 0 0 0
50233 - 0 0 0 0 0 0 0 0 0 0 0 0
50234 - 0 0 0 0 0 0 0 0 0 0 0 0
50235 - 0 0 0 0 0 0 0 0 0 0 0 0
50236 - 6 6 6 18 18 18 42 42 42 82 82 82
50237 - 26 26 26 2 2 6 2 2 6 2 2 6
50238 - 2 2 6 2 2 6 2 2 6 2 2 6
50239 - 2 2 6 2 2 6 2 2 6 14 14 14
50240 - 46 46 46 34 34 34 6 6 6 2 2 6
50241 - 42 42 42 78 78 78 42 42 42 18 18 18
50242 - 6 6 6 0 0 0 0 0 0 0 0 0
50243 - 0 0 0 0 0 0 0 0 0 0 0 0
50244 - 0 0 0 0 0 0 0 0 0 0 0 0
50245 - 0 0 0 0 0 0 0 0 0 0 0 0
50246 - 0 0 0 0 0 0 0 0 0 0 0 0
50247 - 0 0 0 0 0 0 0 0 0 0 0 0
50248 - 0 0 0 0 0 0 0 0 0 0 0 0
50249 - 0 0 0 0 0 0 0 0 0 0 0 0
50250 - 0 0 1 0 0 0 0 0 1 0 0 0
50251 - 0 0 0 0 0 0 0 0 0 0 0 0
50252 - 0 0 0 0 0 0 0 0 0 0 0 0
50253 - 0 0 0 0 0 0 0 0 0 0 0 0
50254 - 0 0 0 0 0 0 0 0 0 0 0 0
50255 - 0 0 0 0 0 0 0 0 0 0 0 0
50256 - 10 10 10 30 30 30 66 66 66 58 58 58
50257 - 2 2 6 2 2 6 2 2 6 2 2 6
50258 - 2 2 6 2 2 6 2 2 6 2 2 6
50259 - 2 2 6 2 2 6 2 2 6 26 26 26
50260 - 86 86 86 101 101 101 46 46 46 10 10 10
50261 - 2 2 6 58 58 58 70 70 70 34 34 34
50262 - 10 10 10 0 0 0 0 0 0 0 0 0
50263 - 0 0 0 0 0 0 0 0 0 0 0 0
50264 - 0 0 0 0 0 0 0 0 0 0 0 0
50265 - 0 0 0 0 0 0 0 0 0 0 0 0
50266 - 0 0 0 0 0 0 0 0 0 0 0 0
50267 - 0 0 0 0 0 0 0 0 0 0 0 0
50268 - 0 0 0 0 0 0 0 0 0 0 0 0
50269 - 0 0 0 0 0 0 0 0 0 0 0 0
50270 - 0 0 1 0 0 1 0 0 1 0 0 0
50271 - 0 0 0 0 0 0 0 0 0 0 0 0
50272 - 0 0 0 0 0 0 0 0 0 0 0 0
50273 - 0 0 0 0 0 0 0 0 0 0 0 0
50274 - 0 0 0 0 0 0 0 0 0 0 0 0
50275 - 0 0 0 0 0 0 0 0 0 0 0 0
50276 - 14 14 14 42 42 42 86 86 86 10 10 10
50277 - 2 2 6 2 2 6 2 2 6 2 2 6
50278 - 2 2 6 2 2 6 2 2 6 2 2 6
50279 - 2 2 6 2 2 6 2 2 6 30 30 30
50280 - 94 94 94 94 94 94 58 58 58 26 26 26
50281 - 2 2 6 6 6 6 78 78 78 54 54 54
50282 - 22 22 22 6 6 6 0 0 0 0 0 0
50283 - 0 0 0 0 0 0 0 0 0 0 0 0
50284 - 0 0 0 0 0 0 0 0 0 0 0 0
50285 - 0 0 0 0 0 0 0 0 0 0 0 0
50286 - 0 0 0 0 0 0 0 0 0 0 0 0
50287 - 0 0 0 0 0 0 0 0 0 0 0 0
50288 - 0 0 0 0 0 0 0 0 0 0 0 0
50289 - 0 0 0 0 0 0 0 0 0 0 0 0
50290 - 0 0 0 0 0 0 0 0 0 0 0 0
50291 - 0 0 0 0 0 0 0 0 0 0 0 0
50292 - 0 0 0 0 0 0 0 0 0 0 0 0
50293 - 0 0 0 0 0 0 0 0 0 0 0 0
50294 - 0 0 0 0 0 0 0 0 0 0 0 0
50295 - 0 0 0 0 0 0 0 0 0 6 6 6
50296 - 22 22 22 62 62 62 62 62 62 2 2 6
50297 - 2 2 6 2 2 6 2 2 6 2 2 6
50298 - 2 2 6 2 2 6 2 2 6 2 2 6
50299 - 2 2 6 2 2 6 2 2 6 26 26 26
50300 - 54 54 54 38 38 38 18 18 18 10 10 10
50301 - 2 2 6 2 2 6 34 34 34 82 82 82
50302 - 38 38 38 14 14 14 0 0 0 0 0 0
50303 - 0 0 0 0 0 0 0 0 0 0 0 0
50304 - 0 0 0 0 0 0 0 0 0 0 0 0
50305 - 0 0 0 0 0 0 0 0 0 0 0 0
50306 - 0 0 0 0 0 0 0 0 0 0 0 0
50307 - 0 0 0 0 0 0 0 0 0 0 0 0
50308 - 0 0 0 0 0 0 0 0 0 0 0 0
50309 - 0 0 0 0 0 0 0 0 0 0 0 0
50310 - 0 0 0 0 0 1 0 0 1 0 0 0
50311 - 0 0 0 0 0 0 0 0 0 0 0 0
50312 - 0 0 0 0 0 0 0 0 0 0 0 0
50313 - 0 0 0 0 0 0 0 0 0 0 0 0
50314 - 0 0 0 0 0 0 0 0 0 0 0 0
50315 - 0 0 0 0 0 0 0 0 0 6 6 6
50316 - 30 30 30 78 78 78 30 30 30 2 2 6
50317 - 2 2 6 2 2 6 2 2 6 2 2 6
50318 - 2 2 6 2 2 6 2 2 6 2 2 6
50319 - 2 2 6 2 2 6 2 2 6 10 10 10
50320 - 10 10 10 2 2 6 2 2 6 2 2 6
50321 - 2 2 6 2 2 6 2 2 6 78 78 78
50322 - 50 50 50 18 18 18 6 6 6 0 0 0
50323 - 0 0 0 0 0 0 0 0 0 0 0 0
50324 - 0 0 0 0 0 0 0 0 0 0 0 0
50325 - 0 0 0 0 0 0 0 0 0 0 0 0
50326 - 0 0 0 0 0 0 0 0 0 0 0 0
50327 - 0 0 0 0 0 0 0 0 0 0 0 0
50328 - 0 0 0 0 0 0 0 0 0 0 0 0
50329 - 0 0 0 0 0 0 0 0 0 0 0 0
50330 - 0 0 1 0 0 0 0 0 0 0 0 0
50331 - 0 0 0 0 0 0 0 0 0 0 0 0
50332 - 0 0 0 0 0 0 0 0 0 0 0 0
50333 - 0 0 0 0 0 0 0 0 0 0 0 0
50334 - 0 0 0 0 0 0 0 0 0 0 0 0
50335 - 0 0 0 0 0 0 0 0 0 10 10 10
50336 - 38 38 38 86 86 86 14 14 14 2 2 6
50337 - 2 2 6 2 2 6 2 2 6 2 2 6
50338 - 2 2 6 2 2 6 2 2 6 2 2 6
50339 - 2 2 6 2 2 6 2 2 6 2 2 6
50340 - 2 2 6 2 2 6 2 2 6 2 2 6
50341 - 2 2 6 2 2 6 2 2 6 54 54 54
50342 - 66 66 66 26 26 26 6 6 6 0 0 0
50343 - 0 0 0 0 0 0 0 0 0 0 0 0
50344 - 0 0 0 0 0 0 0 0 0 0 0 0
50345 - 0 0 0 0 0 0 0 0 0 0 0 0
50346 - 0 0 0 0 0 0 0 0 0 0 0 0
50347 - 0 0 0 0 0 0 0 0 0 0 0 0
50348 - 0 0 0 0 0 0 0 0 0 0 0 0
50349 - 0 0 0 0 0 0 0 0 0 0 0 0
50350 - 0 0 0 0 0 1 0 0 1 0 0 0
50351 - 0 0 0 0 0 0 0 0 0 0 0 0
50352 - 0 0 0 0 0 0 0 0 0 0 0 0
50353 - 0 0 0 0 0 0 0 0 0 0 0 0
50354 - 0 0 0 0 0 0 0 0 0 0 0 0
50355 - 0 0 0 0 0 0 0 0 0 14 14 14
50356 - 42 42 42 82 82 82 2 2 6 2 2 6
50357 - 2 2 6 6 6 6 10 10 10 2 2 6
50358 - 2 2 6 2 2 6 2 2 6 2 2 6
50359 - 2 2 6 2 2 6 2 2 6 6 6 6
50360 - 14 14 14 10 10 10 2 2 6 2 2 6
50361 - 2 2 6 2 2 6 2 2 6 18 18 18
50362 - 82 82 82 34 34 34 10 10 10 0 0 0
50363 - 0 0 0 0 0 0 0 0 0 0 0 0
50364 - 0 0 0 0 0 0 0 0 0 0 0 0
50365 - 0 0 0 0 0 0 0 0 0 0 0 0
50366 - 0 0 0 0 0 0 0 0 0 0 0 0
50367 - 0 0 0 0 0 0 0 0 0 0 0 0
50368 - 0 0 0 0 0 0 0 0 0 0 0 0
50369 - 0 0 0 0 0 0 0 0 0 0 0 0
50370 - 0 0 1 0 0 0 0 0 0 0 0 0
50371 - 0 0 0 0 0 0 0 0 0 0 0 0
50372 - 0 0 0 0 0 0 0 0 0 0 0 0
50373 - 0 0 0 0 0 0 0 0 0 0 0 0
50374 - 0 0 0 0 0 0 0 0 0 0 0 0
50375 - 0 0 0 0 0 0 0 0 0 14 14 14
50376 - 46 46 46 86 86 86 2 2 6 2 2 6
50377 - 6 6 6 6 6 6 22 22 22 34 34 34
50378 - 6 6 6 2 2 6 2 2 6 2 2 6
50379 - 2 2 6 2 2 6 18 18 18 34 34 34
50380 - 10 10 10 50 50 50 22 22 22 2 2 6
50381 - 2 2 6 2 2 6 2 2 6 10 10 10
50382 - 86 86 86 42 42 42 14 14 14 0 0 0
50383 - 0 0 0 0 0 0 0 0 0 0 0 0
50384 - 0 0 0 0 0 0 0 0 0 0 0 0
50385 - 0 0 0 0 0 0 0 0 0 0 0 0
50386 - 0 0 0 0 0 0 0 0 0 0 0 0
50387 - 0 0 0 0 0 0 0 0 0 0 0 0
50388 - 0 0 0 0 0 0 0 0 0 0 0 0
50389 - 0 0 0 0 0 0 0 0 0 0 0 0
50390 - 0 0 1 0 0 1 0 0 1 0 0 0
50391 - 0 0 0 0 0 0 0 0 0 0 0 0
50392 - 0 0 0 0 0 0 0 0 0 0 0 0
50393 - 0 0 0 0 0 0 0 0 0 0 0 0
50394 - 0 0 0 0 0 0 0 0 0 0 0 0
50395 - 0 0 0 0 0 0 0 0 0 14 14 14
50396 - 46 46 46 86 86 86 2 2 6 2 2 6
50397 - 38 38 38 116 116 116 94 94 94 22 22 22
50398 - 22 22 22 2 2 6 2 2 6 2 2 6
50399 - 14 14 14 86 86 86 138 138 138 162 162 162
50400 -154 154 154 38 38 38 26 26 26 6 6 6
50401 - 2 2 6 2 2 6 2 2 6 2 2 6
50402 - 86 86 86 46 46 46 14 14 14 0 0 0
50403 - 0 0 0 0 0 0 0 0 0 0 0 0
50404 - 0 0 0 0 0 0 0 0 0 0 0 0
50405 - 0 0 0 0 0 0 0 0 0 0 0 0
50406 - 0 0 0 0 0 0 0 0 0 0 0 0
50407 - 0 0 0 0 0 0 0 0 0 0 0 0
50408 - 0 0 0 0 0 0 0 0 0 0 0 0
50409 - 0 0 0 0 0 0 0 0 0 0 0 0
50410 - 0 0 0 0 0 0 0 0 0 0 0 0
50411 - 0 0 0 0 0 0 0 0 0 0 0 0
50412 - 0 0 0 0 0 0 0 0 0 0 0 0
50413 - 0 0 0 0 0 0 0 0 0 0 0 0
50414 - 0 0 0 0 0 0 0 0 0 0 0 0
50415 - 0 0 0 0 0 0 0 0 0 14 14 14
50416 - 46 46 46 86 86 86 2 2 6 14 14 14
50417 -134 134 134 198 198 198 195 195 195 116 116 116
50418 - 10 10 10 2 2 6 2 2 6 6 6 6
50419 -101 98 89 187 187 187 210 210 210 218 218 218
50420 -214 214 214 134 134 134 14 14 14 6 6 6
50421 - 2 2 6 2 2 6 2 2 6 2 2 6
50422 - 86 86 86 50 50 50 18 18 18 6 6 6
50423 - 0 0 0 0 0 0 0 0 0 0 0 0
50424 - 0 0 0 0 0 0 0 0 0 0 0 0
50425 - 0 0 0 0 0 0 0 0 0 0 0 0
50426 - 0 0 0 0 0 0 0 0 0 0 0 0
50427 - 0 0 0 0 0 0 0 0 0 0 0 0
50428 - 0 0 0 0 0 0 0 0 0 0 0 0
50429 - 0 0 0 0 0 0 0 0 1 0 0 0
50430 - 0 0 1 0 0 1 0 0 1 0 0 0
50431 - 0 0 0 0 0 0 0 0 0 0 0 0
50432 - 0 0 0 0 0 0 0 0 0 0 0 0
50433 - 0 0 0 0 0 0 0 0 0 0 0 0
50434 - 0 0 0 0 0 0 0 0 0 0 0 0
50435 - 0 0 0 0 0 0 0 0 0 14 14 14
50436 - 46 46 46 86 86 86 2 2 6 54 54 54
50437 -218 218 218 195 195 195 226 226 226 246 246 246
50438 - 58 58 58 2 2 6 2 2 6 30 30 30
50439 -210 210 210 253 253 253 174 174 174 123 123 123
50440 -221 221 221 234 234 234 74 74 74 2 2 6
50441 - 2 2 6 2 2 6 2 2 6 2 2 6
50442 - 70 70 70 58 58 58 22 22 22 6 6 6
50443 - 0 0 0 0 0 0 0 0 0 0 0 0
50444 - 0 0 0 0 0 0 0 0 0 0 0 0
50445 - 0 0 0 0 0 0 0 0 0 0 0 0
50446 - 0 0 0 0 0 0 0 0 0 0 0 0
50447 - 0 0 0 0 0 0 0 0 0 0 0 0
50448 - 0 0 0 0 0 0 0 0 0 0 0 0
50449 - 0 0 0 0 0 0 0 0 0 0 0 0
50450 - 0 0 0 0 0 0 0 0 0 0 0 0
50451 - 0 0 0 0 0 0 0 0 0 0 0 0
50452 - 0 0 0 0 0 0 0 0 0 0 0 0
50453 - 0 0 0 0 0 0 0 0 0 0 0 0
50454 - 0 0 0 0 0 0 0 0 0 0 0 0
50455 - 0 0 0 0 0 0 0 0 0 14 14 14
50456 - 46 46 46 82 82 82 2 2 6 106 106 106
50457 -170 170 170 26 26 26 86 86 86 226 226 226
50458 -123 123 123 10 10 10 14 14 14 46 46 46
50459 -231 231 231 190 190 190 6 6 6 70 70 70
50460 - 90 90 90 238 238 238 158 158 158 2 2 6
50461 - 2 2 6 2 2 6 2 2 6 2 2 6
50462 - 70 70 70 58 58 58 22 22 22 6 6 6
50463 - 0 0 0 0 0 0 0 0 0 0 0 0
50464 - 0 0 0 0 0 0 0 0 0 0 0 0
50465 - 0 0 0 0 0 0 0 0 0 0 0 0
50466 - 0 0 0 0 0 0 0 0 0 0 0 0
50467 - 0 0 0 0 0 0 0 0 0 0 0 0
50468 - 0 0 0 0 0 0 0 0 0 0 0 0
50469 - 0 0 0 0 0 0 0 0 1 0 0 0
50470 - 0 0 1 0 0 1 0 0 1 0 0 0
50471 - 0 0 0 0 0 0 0 0 0 0 0 0
50472 - 0 0 0 0 0 0 0 0 0 0 0 0
50473 - 0 0 0 0 0 0 0 0 0 0 0 0
50474 - 0 0 0 0 0 0 0 0 0 0 0 0
50475 - 0 0 0 0 0 0 0 0 0 14 14 14
50476 - 42 42 42 86 86 86 6 6 6 116 116 116
50477 -106 106 106 6 6 6 70 70 70 149 149 149
50478 -128 128 128 18 18 18 38 38 38 54 54 54
50479 -221 221 221 106 106 106 2 2 6 14 14 14
50480 - 46 46 46 190 190 190 198 198 198 2 2 6
50481 - 2 2 6 2 2 6 2 2 6 2 2 6
50482 - 74 74 74 62 62 62 22 22 22 6 6 6
50483 - 0 0 0 0 0 0 0 0 0 0 0 0
50484 - 0 0 0 0 0 0 0 0 0 0 0 0
50485 - 0 0 0 0 0 0 0 0 0 0 0 0
50486 - 0 0 0 0 0 0 0 0 0 0 0 0
50487 - 0 0 0 0 0 0 0 0 0 0 0 0
50488 - 0 0 0 0 0 0 0 0 0 0 0 0
50489 - 0 0 0 0 0 0 0 0 1 0 0 0
50490 - 0 0 1 0 0 0 0 0 1 0 0 0
50491 - 0 0 0 0 0 0 0 0 0 0 0 0
50492 - 0 0 0 0 0 0 0 0 0 0 0 0
50493 - 0 0 0 0 0 0 0 0 0 0 0 0
50494 - 0 0 0 0 0 0 0 0 0 0 0 0
50495 - 0 0 0 0 0 0 0 0 0 14 14 14
50496 - 42 42 42 94 94 94 14 14 14 101 101 101
50497 -128 128 128 2 2 6 18 18 18 116 116 116
50498 -118 98 46 121 92 8 121 92 8 98 78 10
50499 -162 162 162 106 106 106 2 2 6 2 2 6
50500 - 2 2 6 195 195 195 195 195 195 6 6 6
50501 - 2 2 6 2 2 6 2 2 6 2 2 6
50502 - 74 74 74 62 62 62 22 22 22 6 6 6
50503 - 0 0 0 0 0 0 0 0 0 0 0 0
50504 - 0 0 0 0 0 0 0 0 0 0 0 0
50505 - 0 0 0 0 0 0 0 0 0 0 0 0
50506 - 0 0 0 0 0 0 0 0 0 0 0 0
50507 - 0 0 0 0 0 0 0 0 0 0 0 0
50508 - 0 0 0 0 0 0 0 0 0 0 0 0
50509 - 0 0 0 0 0 0 0 0 1 0 0 1
50510 - 0 0 1 0 0 0 0 0 1 0 0 0
50511 - 0 0 0 0 0 0 0 0 0 0 0 0
50512 - 0 0 0 0 0 0 0 0 0 0 0 0
50513 - 0 0 0 0 0 0 0 0 0 0 0 0
50514 - 0 0 0 0 0 0 0 0 0 0 0 0
50515 - 0 0 0 0 0 0 0 0 0 10 10 10
50516 - 38 38 38 90 90 90 14 14 14 58 58 58
50517 -210 210 210 26 26 26 54 38 6 154 114 10
50518 -226 170 11 236 186 11 225 175 15 184 144 12
50519 -215 174 15 175 146 61 37 26 9 2 2 6
50520 - 70 70 70 246 246 246 138 138 138 2 2 6
50521 - 2 2 6 2 2 6 2 2 6 2 2 6
50522 - 70 70 70 66 66 66 26 26 26 6 6 6
50523 - 0 0 0 0 0 0 0 0 0 0 0 0
50524 - 0 0 0 0 0 0 0 0 0 0 0 0
50525 - 0 0 0 0 0 0 0 0 0 0 0 0
50526 - 0 0 0 0 0 0 0 0 0 0 0 0
50527 - 0 0 0 0 0 0 0 0 0 0 0 0
50528 - 0 0 0 0 0 0 0 0 0 0 0 0
50529 - 0 0 0 0 0 0 0 0 0 0 0 0
50530 - 0 0 0 0 0 0 0 0 0 0 0 0
50531 - 0 0 0 0 0 0 0 0 0 0 0 0
50532 - 0 0 0 0 0 0 0 0 0 0 0 0
50533 - 0 0 0 0 0 0 0 0 0 0 0 0
50534 - 0 0 0 0 0 0 0 0 0 0 0 0
50535 - 0 0 0 0 0 0 0 0 0 10 10 10
50536 - 38 38 38 86 86 86 14 14 14 10 10 10
50537 -195 195 195 188 164 115 192 133 9 225 175 15
50538 -239 182 13 234 190 10 232 195 16 232 200 30
50539 -245 207 45 241 208 19 232 195 16 184 144 12
50540 -218 194 134 211 206 186 42 42 42 2 2 6
50541 - 2 2 6 2 2 6 2 2 6 2 2 6
50542 - 50 50 50 74 74 74 30 30 30 6 6 6
50543 - 0 0 0 0 0 0 0 0 0 0 0 0
50544 - 0 0 0 0 0 0 0 0 0 0 0 0
50545 - 0 0 0 0 0 0 0 0 0 0 0 0
50546 - 0 0 0 0 0 0 0 0 0 0 0 0
50547 - 0 0 0 0 0 0 0 0 0 0 0 0
50548 - 0 0 0 0 0 0 0 0 0 0 0 0
50549 - 0 0 0 0 0 0 0 0 0 0 0 0
50550 - 0 0 0 0 0 0 0 0 0 0 0 0
50551 - 0 0 0 0 0 0 0 0 0 0 0 0
50552 - 0 0 0 0 0 0 0 0 0 0 0 0
50553 - 0 0 0 0 0 0 0 0 0 0 0 0
50554 - 0 0 0 0 0 0 0 0 0 0 0 0
50555 - 0 0 0 0 0 0 0 0 0 10 10 10
50556 - 34 34 34 86 86 86 14 14 14 2 2 6
50557 -121 87 25 192 133 9 219 162 10 239 182 13
50558 -236 186 11 232 195 16 241 208 19 244 214 54
50559 -246 218 60 246 218 38 246 215 20 241 208 19
50560 -241 208 19 226 184 13 121 87 25 2 2 6
50561 - 2 2 6 2 2 6 2 2 6 2 2 6
50562 - 50 50 50 82 82 82 34 34 34 10 10 10
50563 - 0 0 0 0 0 0 0 0 0 0 0 0
50564 - 0 0 0 0 0 0 0 0 0 0 0 0
50565 - 0 0 0 0 0 0 0 0 0 0 0 0
50566 - 0 0 0 0 0 0 0 0 0 0 0 0
50567 - 0 0 0 0 0 0 0 0 0 0 0 0
50568 - 0 0 0 0 0 0 0 0 0 0 0 0
50569 - 0 0 0 0 0 0 0 0 0 0 0 0
50570 - 0 0 0 0 0 0 0 0 0 0 0 0
50571 - 0 0 0 0 0 0 0 0 0 0 0 0
50572 - 0 0 0 0 0 0 0 0 0 0 0 0
50573 - 0 0 0 0 0 0 0 0 0 0 0 0
50574 - 0 0 0 0 0 0 0 0 0 0 0 0
50575 - 0 0 0 0 0 0 0 0 0 10 10 10
50576 - 34 34 34 82 82 82 30 30 30 61 42 6
50577 -180 123 7 206 145 10 230 174 11 239 182 13
50578 -234 190 10 238 202 15 241 208 19 246 218 74
50579 -246 218 38 246 215 20 246 215 20 246 215 20
50580 -226 184 13 215 174 15 184 144 12 6 6 6
50581 - 2 2 6 2 2 6 2 2 6 2 2 6
50582 - 26 26 26 94 94 94 42 42 42 14 14 14
50583 - 0 0 0 0 0 0 0 0 0 0 0 0
50584 - 0 0 0 0 0 0 0 0 0 0 0 0
50585 - 0 0 0 0 0 0 0 0 0 0 0 0
50586 - 0 0 0 0 0 0 0 0 0 0 0 0
50587 - 0 0 0 0 0 0 0 0 0 0 0 0
50588 - 0 0 0 0 0 0 0 0 0 0 0 0
50589 - 0 0 0 0 0 0 0 0 0 0 0 0
50590 - 0 0 0 0 0 0 0 0 0 0 0 0
50591 - 0 0 0 0 0 0 0 0 0 0 0 0
50592 - 0 0 0 0 0 0 0 0 0 0 0 0
50593 - 0 0 0 0 0 0 0 0 0 0 0 0
50594 - 0 0 0 0 0 0 0 0 0 0 0 0
50595 - 0 0 0 0 0 0 0 0 0 10 10 10
50596 - 30 30 30 78 78 78 50 50 50 104 69 6
50597 -192 133 9 216 158 10 236 178 12 236 186 11
50598 -232 195 16 241 208 19 244 214 54 245 215 43
50599 -246 215 20 246 215 20 241 208 19 198 155 10
50600 -200 144 11 216 158 10 156 118 10 2 2 6
50601 - 2 2 6 2 2 6 2 2 6 2 2 6
50602 - 6 6 6 90 90 90 54 54 54 18 18 18
50603 - 6 6 6 0 0 0 0 0 0 0 0 0
50604 - 0 0 0 0 0 0 0 0 0 0 0 0
50605 - 0 0 0 0 0 0 0 0 0 0 0 0
50606 - 0 0 0 0 0 0 0 0 0 0 0 0
50607 - 0 0 0 0 0 0 0 0 0 0 0 0
50608 - 0 0 0 0 0 0 0 0 0 0 0 0
50609 - 0 0 0 0 0 0 0 0 0 0 0 0
50610 - 0 0 0 0 0 0 0 0 0 0 0 0
50611 - 0 0 0 0 0 0 0 0 0 0 0 0
50612 - 0 0 0 0 0 0 0 0 0 0 0 0
50613 - 0 0 0 0 0 0 0 0 0 0 0 0
50614 - 0 0 0 0 0 0 0 0 0 0 0 0
50615 - 0 0 0 0 0 0 0 0 0 10 10 10
50616 - 30 30 30 78 78 78 46 46 46 22 22 22
50617 -137 92 6 210 162 10 239 182 13 238 190 10
50618 -238 202 15 241 208 19 246 215 20 246 215 20
50619 -241 208 19 203 166 17 185 133 11 210 150 10
50620 -216 158 10 210 150 10 102 78 10 2 2 6
50621 - 6 6 6 54 54 54 14 14 14 2 2 6
50622 - 2 2 6 62 62 62 74 74 74 30 30 30
50623 - 10 10 10 0 0 0 0 0 0 0 0 0
50624 - 0 0 0 0 0 0 0 0 0 0 0 0
50625 - 0 0 0 0 0 0 0 0 0 0 0 0
50626 - 0 0 0 0 0 0 0 0 0 0 0 0
50627 - 0 0 0 0 0 0 0 0 0 0 0 0
50628 - 0 0 0 0 0 0 0 0 0 0 0 0
50629 - 0 0 0 0 0 0 0 0 0 0 0 0
50630 - 0 0 0 0 0 0 0 0 0 0 0 0
50631 - 0 0 0 0 0 0 0 0 0 0 0 0
50632 - 0 0 0 0 0 0 0 0 0 0 0 0
50633 - 0 0 0 0 0 0 0 0 0 0 0 0
50634 - 0 0 0 0 0 0 0 0 0 0 0 0
50635 - 0 0 0 0 0 0 0 0 0 10 10 10
50636 - 34 34 34 78 78 78 50 50 50 6 6 6
50637 - 94 70 30 139 102 15 190 146 13 226 184 13
50638 -232 200 30 232 195 16 215 174 15 190 146 13
50639 -168 122 10 192 133 9 210 150 10 213 154 11
50640 -202 150 34 182 157 106 101 98 89 2 2 6
50641 - 2 2 6 78 78 78 116 116 116 58 58 58
50642 - 2 2 6 22 22 22 90 90 90 46 46 46
50643 - 18 18 18 6 6 6 0 0 0 0 0 0
50644 - 0 0 0 0 0 0 0 0 0 0 0 0
50645 - 0 0 0 0 0 0 0 0 0 0 0 0
50646 - 0 0 0 0 0 0 0 0 0 0 0 0
50647 - 0 0 0 0 0 0 0 0 0 0 0 0
50648 - 0 0 0 0 0 0 0 0 0 0 0 0
50649 - 0 0 0 0 0 0 0 0 0 0 0 0
50650 - 0 0 0 0 0 0 0 0 0 0 0 0
50651 - 0 0 0 0 0 0 0 0 0 0 0 0
50652 - 0 0 0 0 0 0 0 0 0 0 0 0
50653 - 0 0 0 0 0 0 0 0 0 0 0 0
50654 - 0 0 0 0 0 0 0 0 0 0 0 0
50655 - 0 0 0 0 0 0 0 0 0 10 10 10
50656 - 38 38 38 86 86 86 50 50 50 6 6 6
50657 -128 128 128 174 154 114 156 107 11 168 122 10
50658 -198 155 10 184 144 12 197 138 11 200 144 11
50659 -206 145 10 206 145 10 197 138 11 188 164 115
50660 -195 195 195 198 198 198 174 174 174 14 14 14
50661 - 2 2 6 22 22 22 116 116 116 116 116 116
50662 - 22 22 22 2 2 6 74 74 74 70 70 70
50663 - 30 30 30 10 10 10 0 0 0 0 0 0
50664 - 0 0 0 0 0 0 0 0 0 0 0 0
50665 - 0 0 0 0 0 0 0 0 0 0 0 0
50666 - 0 0 0 0 0 0 0 0 0 0 0 0
50667 - 0 0 0 0 0 0 0 0 0 0 0 0
50668 - 0 0 0 0 0 0 0 0 0 0 0 0
50669 - 0 0 0 0 0 0 0 0 0 0 0 0
50670 - 0 0 0 0 0 0 0 0 0 0 0 0
50671 - 0 0 0 0 0 0 0 0 0 0 0 0
50672 - 0 0 0 0 0 0 0 0 0 0 0 0
50673 - 0 0 0 0 0 0 0 0 0 0 0 0
50674 - 0 0 0 0 0 0 0 0 0 0 0 0
50675 - 0 0 0 0 0 0 6 6 6 18 18 18
50676 - 50 50 50 101 101 101 26 26 26 10 10 10
50677 -138 138 138 190 190 190 174 154 114 156 107 11
50678 -197 138 11 200 144 11 197 138 11 192 133 9
50679 -180 123 7 190 142 34 190 178 144 187 187 187
50680 -202 202 202 221 221 221 214 214 214 66 66 66
50681 - 2 2 6 2 2 6 50 50 50 62 62 62
50682 - 6 6 6 2 2 6 10 10 10 90 90 90
50683 - 50 50 50 18 18 18 6 6 6 0 0 0
50684 - 0 0 0 0 0 0 0 0 0 0 0 0
50685 - 0 0 0 0 0 0 0 0 0 0 0 0
50686 - 0 0 0 0 0 0 0 0 0 0 0 0
50687 - 0 0 0 0 0 0 0 0 0 0 0 0
50688 - 0 0 0 0 0 0 0 0 0 0 0 0
50689 - 0 0 0 0 0 0 0 0 0 0 0 0
50690 - 0 0 0 0 0 0 0 0 0 0 0 0
50691 - 0 0 0 0 0 0 0 0 0 0 0 0
50692 - 0 0 0 0 0 0 0 0 0 0 0 0
50693 - 0 0 0 0 0 0 0 0 0 0 0 0
50694 - 0 0 0 0 0 0 0 0 0 0 0 0
50695 - 0 0 0 0 0 0 10 10 10 34 34 34
50696 - 74 74 74 74 74 74 2 2 6 6 6 6
50697 -144 144 144 198 198 198 190 190 190 178 166 146
50698 -154 121 60 156 107 11 156 107 11 168 124 44
50699 -174 154 114 187 187 187 190 190 190 210 210 210
50700 -246 246 246 253 253 253 253 253 253 182 182 182
50701 - 6 6 6 2 2 6 2 2 6 2 2 6
50702 - 2 2 6 2 2 6 2 2 6 62 62 62
50703 - 74 74 74 34 34 34 14 14 14 0 0 0
50704 - 0 0 0 0 0 0 0 0 0 0 0 0
50705 - 0 0 0 0 0 0 0 0 0 0 0 0
50706 - 0 0 0 0 0 0 0 0 0 0 0 0
50707 - 0 0 0 0 0 0 0 0 0 0 0 0
50708 - 0 0 0 0 0 0 0 0 0 0 0 0
50709 - 0 0 0 0 0 0 0 0 0 0 0 0
50710 - 0 0 0 0 0 0 0 0 0 0 0 0
50711 - 0 0 0 0 0 0 0 0 0 0 0 0
50712 - 0 0 0 0 0 0 0 0 0 0 0 0
50713 - 0 0 0 0 0 0 0 0 0 0 0 0
50714 - 0 0 0 0 0 0 0 0 0 0 0 0
50715 - 0 0 0 10 10 10 22 22 22 54 54 54
50716 - 94 94 94 18 18 18 2 2 6 46 46 46
50717 -234 234 234 221 221 221 190 190 190 190 190 190
50718 -190 190 190 187 187 187 187 187 187 190 190 190
50719 -190 190 190 195 195 195 214 214 214 242 242 242
50720 -253 253 253 253 253 253 253 253 253 253 253 253
50721 - 82 82 82 2 2 6 2 2 6 2 2 6
50722 - 2 2 6 2 2 6 2 2 6 14 14 14
50723 - 86 86 86 54 54 54 22 22 22 6 6 6
50724 - 0 0 0 0 0 0 0 0 0 0 0 0
50725 - 0 0 0 0 0 0 0 0 0 0 0 0
50726 - 0 0 0 0 0 0 0 0 0 0 0 0
50727 - 0 0 0 0 0 0 0 0 0 0 0 0
50728 - 0 0 0 0 0 0 0 0 0 0 0 0
50729 - 0 0 0 0 0 0 0 0 0 0 0 0
50730 - 0 0 0 0 0 0 0 0 0 0 0 0
50731 - 0 0 0 0 0 0 0 0 0 0 0 0
50732 - 0 0 0 0 0 0 0 0 0 0 0 0
50733 - 0 0 0 0 0 0 0 0 0 0 0 0
50734 - 0 0 0 0 0 0 0 0 0 0 0 0
50735 - 6 6 6 18 18 18 46 46 46 90 90 90
50736 - 46 46 46 18 18 18 6 6 6 182 182 182
50737 -253 253 253 246 246 246 206 206 206 190 190 190
50738 -190 190 190 190 190 190 190 190 190 190 190 190
50739 -206 206 206 231 231 231 250 250 250 253 253 253
50740 -253 253 253 253 253 253 253 253 253 253 253 253
50741 -202 202 202 14 14 14 2 2 6 2 2 6
50742 - 2 2 6 2 2 6 2 2 6 2 2 6
50743 - 42 42 42 86 86 86 42 42 42 18 18 18
50744 - 6 6 6 0 0 0 0 0 0 0 0 0
50745 - 0 0 0 0 0 0 0 0 0 0 0 0
50746 - 0 0 0 0 0 0 0 0 0 0 0 0
50747 - 0 0 0 0 0 0 0 0 0 0 0 0
50748 - 0 0 0 0 0 0 0 0 0 0 0 0
50749 - 0 0 0 0 0 0 0 0 0 0 0 0
50750 - 0 0 0 0 0 0 0 0 0 0 0 0
50751 - 0 0 0 0 0 0 0 0 0 0 0 0
50752 - 0 0 0 0 0 0 0 0 0 0 0 0
50753 - 0 0 0 0 0 0 0 0 0 0 0 0
50754 - 0 0 0 0 0 0 0 0 0 6 6 6
50755 - 14 14 14 38 38 38 74 74 74 66 66 66
50756 - 2 2 6 6 6 6 90 90 90 250 250 250
50757 -253 253 253 253 253 253 238 238 238 198 198 198
50758 -190 190 190 190 190 190 195 195 195 221 221 221
50759 -246 246 246 253 253 253 253 253 253 253 253 253
50760 -253 253 253 253 253 253 253 253 253 253 253 253
50761 -253 253 253 82 82 82 2 2 6 2 2 6
50762 - 2 2 6 2 2 6 2 2 6 2 2 6
50763 - 2 2 6 78 78 78 70 70 70 34 34 34
50764 - 14 14 14 6 6 6 0 0 0 0 0 0
50765 - 0 0 0 0 0 0 0 0 0 0 0 0
50766 - 0 0 0 0 0 0 0 0 0 0 0 0
50767 - 0 0 0 0 0 0 0 0 0 0 0 0
50768 - 0 0 0 0 0 0 0 0 0 0 0 0
50769 - 0 0 0 0 0 0 0 0 0 0 0 0
50770 - 0 0 0 0 0 0 0 0 0 0 0 0
50771 - 0 0 0 0 0 0 0 0 0 0 0 0
50772 - 0 0 0 0 0 0 0 0 0 0 0 0
50773 - 0 0 0 0 0 0 0 0 0 0 0 0
50774 - 0 0 0 0 0 0 0 0 0 14 14 14
50775 - 34 34 34 66 66 66 78 78 78 6 6 6
50776 - 2 2 6 18 18 18 218 218 218 253 253 253
50777 -253 253 253 253 253 253 253 253 253 246 246 246
50778 -226 226 226 231 231 231 246 246 246 253 253 253
50779 -253 253 253 253 253 253 253 253 253 253 253 253
50780 -253 253 253 253 253 253 253 253 253 253 253 253
50781 -253 253 253 178 178 178 2 2 6 2 2 6
50782 - 2 2 6 2 2 6 2 2 6 2 2 6
50783 - 2 2 6 18 18 18 90 90 90 62 62 62
50784 - 30 30 30 10 10 10 0 0 0 0 0 0
50785 - 0 0 0 0 0 0 0 0 0 0 0 0
50786 - 0 0 0 0 0 0 0 0 0 0 0 0
50787 - 0 0 0 0 0 0 0 0 0 0 0 0
50788 - 0 0 0 0 0 0 0 0 0 0 0 0
50789 - 0 0 0 0 0 0 0 0 0 0 0 0
50790 - 0 0 0 0 0 0 0 0 0 0 0 0
50791 - 0 0 0 0 0 0 0 0 0 0 0 0
50792 - 0 0 0 0 0 0 0 0 0 0 0 0
50793 - 0 0 0 0 0 0 0 0 0 0 0 0
50794 - 0 0 0 0 0 0 10 10 10 26 26 26
50795 - 58 58 58 90 90 90 18 18 18 2 2 6
50796 - 2 2 6 110 110 110 253 253 253 253 253 253
50797 -253 253 253 253 253 253 253 253 253 253 253 253
50798 -250 250 250 253 253 253 253 253 253 253 253 253
50799 -253 253 253 253 253 253 253 253 253 253 253 253
50800 -253 253 253 253 253 253 253 253 253 253 253 253
50801 -253 253 253 231 231 231 18 18 18 2 2 6
50802 - 2 2 6 2 2 6 2 2 6 2 2 6
50803 - 2 2 6 2 2 6 18 18 18 94 94 94
50804 - 54 54 54 26 26 26 10 10 10 0 0 0
50805 - 0 0 0 0 0 0 0 0 0 0 0 0
50806 - 0 0 0 0 0 0 0 0 0 0 0 0
50807 - 0 0 0 0 0 0 0 0 0 0 0 0
50808 - 0 0 0 0 0 0 0 0 0 0 0 0
50809 - 0 0 0 0 0 0 0 0 0 0 0 0
50810 - 0 0 0 0 0 0 0 0 0 0 0 0
50811 - 0 0 0 0 0 0 0 0 0 0 0 0
50812 - 0 0 0 0 0 0 0 0 0 0 0 0
50813 - 0 0 0 0 0 0 0 0 0 0 0 0
50814 - 0 0 0 6 6 6 22 22 22 50 50 50
50815 - 90 90 90 26 26 26 2 2 6 2 2 6
50816 - 14 14 14 195 195 195 250 250 250 253 253 253
50817 -253 253 253 253 253 253 253 253 253 253 253 253
50818 -253 253 253 253 253 253 253 253 253 253 253 253
50819 -253 253 253 253 253 253 253 253 253 253 253 253
50820 -253 253 253 253 253 253 253 253 253 253 253 253
50821 -250 250 250 242 242 242 54 54 54 2 2 6
50822 - 2 2 6 2 2 6 2 2 6 2 2 6
50823 - 2 2 6 2 2 6 2 2 6 38 38 38
50824 - 86 86 86 50 50 50 22 22 22 6 6 6
50825 - 0 0 0 0 0 0 0 0 0 0 0 0
50826 - 0 0 0 0 0 0 0 0 0 0 0 0
50827 - 0 0 0 0 0 0 0 0 0 0 0 0
50828 - 0 0 0 0 0 0 0 0 0 0 0 0
50829 - 0 0 0 0 0 0 0 0 0 0 0 0
50830 - 0 0 0 0 0 0 0 0 0 0 0 0
50831 - 0 0 0 0 0 0 0 0 0 0 0 0
50832 - 0 0 0 0 0 0 0 0 0 0 0 0
50833 - 0 0 0 0 0 0 0 0 0 0 0 0
50834 - 6 6 6 14 14 14 38 38 38 82 82 82
50835 - 34 34 34 2 2 6 2 2 6 2 2 6
50836 - 42 42 42 195 195 195 246 246 246 253 253 253
50837 -253 253 253 253 253 253 253 253 253 250 250 250
50838 -242 242 242 242 242 242 250 250 250 253 253 253
50839 -253 253 253 253 253 253 253 253 253 253 253 253
50840 -253 253 253 250 250 250 246 246 246 238 238 238
50841 -226 226 226 231 231 231 101 101 101 6 6 6
50842 - 2 2 6 2 2 6 2 2 6 2 2 6
50843 - 2 2 6 2 2 6 2 2 6 2 2 6
50844 - 38 38 38 82 82 82 42 42 42 14 14 14
50845 - 6 6 6 0 0 0 0 0 0 0 0 0
50846 - 0 0 0 0 0 0 0 0 0 0 0 0
50847 - 0 0 0 0 0 0 0 0 0 0 0 0
50848 - 0 0 0 0 0 0 0 0 0 0 0 0
50849 - 0 0 0 0 0 0 0 0 0 0 0 0
50850 - 0 0 0 0 0 0 0 0 0 0 0 0
50851 - 0 0 0 0 0 0 0 0 0 0 0 0
50852 - 0 0 0 0 0 0 0 0 0 0 0 0
50853 - 0 0 0 0 0 0 0 0 0 0 0 0
50854 - 10 10 10 26 26 26 62 62 62 66 66 66
50855 - 2 2 6 2 2 6 2 2 6 6 6 6
50856 - 70 70 70 170 170 170 206 206 206 234 234 234
50857 -246 246 246 250 250 250 250 250 250 238 238 238
50858 -226 226 226 231 231 231 238 238 238 250 250 250
50859 -250 250 250 250 250 250 246 246 246 231 231 231
50860 -214 214 214 206 206 206 202 202 202 202 202 202
50861 -198 198 198 202 202 202 182 182 182 18 18 18
50862 - 2 2 6 2 2 6 2 2 6 2 2 6
50863 - 2 2 6 2 2 6 2 2 6 2 2 6
50864 - 2 2 6 62 62 62 66 66 66 30 30 30
50865 - 10 10 10 0 0 0 0 0 0 0 0 0
50866 - 0 0 0 0 0 0 0 0 0 0 0 0
50867 - 0 0 0 0 0 0 0 0 0 0 0 0
50868 - 0 0 0 0 0 0 0 0 0 0 0 0
50869 - 0 0 0 0 0 0 0 0 0 0 0 0
50870 - 0 0 0 0 0 0 0 0 0 0 0 0
50871 - 0 0 0 0 0 0 0 0 0 0 0 0
50872 - 0 0 0 0 0 0 0 0 0 0 0 0
50873 - 0 0 0 0 0 0 0 0 0 0 0 0
50874 - 14 14 14 42 42 42 82 82 82 18 18 18
50875 - 2 2 6 2 2 6 2 2 6 10 10 10
50876 - 94 94 94 182 182 182 218 218 218 242 242 242
50877 -250 250 250 253 253 253 253 253 253 250 250 250
50878 -234 234 234 253 253 253 253 253 253 253 253 253
50879 -253 253 253 253 253 253 253 253 253 246 246 246
50880 -238 238 238 226 226 226 210 210 210 202 202 202
50881 -195 195 195 195 195 195 210 210 210 158 158 158
50882 - 6 6 6 14 14 14 50 50 50 14 14 14
50883 - 2 2 6 2 2 6 2 2 6 2 2 6
50884 - 2 2 6 6 6 6 86 86 86 46 46 46
50885 - 18 18 18 6 6 6 0 0 0 0 0 0
50886 - 0 0 0 0 0 0 0 0 0 0 0 0
50887 - 0 0 0 0 0 0 0 0 0 0 0 0
50888 - 0 0 0 0 0 0 0 0 0 0 0 0
50889 - 0 0 0 0 0 0 0 0 0 0 0 0
50890 - 0 0 0 0 0 0 0 0 0 0 0 0
50891 - 0 0 0 0 0 0 0 0 0 0 0 0
50892 - 0 0 0 0 0 0 0 0 0 0 0 0
50893 - 0 0 0 0 0 0 0 0 0 6 6 6
50894 - 22 22 22 54 54 54 70 70 70 2 2 6
50895 - 2 2 6 10 10 10 2 2 6 22 22 22
50896 -166 166 166 231 231 231 250 250 250 253 253 253
50897 -253 253 253 253 253 253 253 253 253 250 250 250
50898 -242 242 242 253 253 253 253 253 253 253 253 253
50899 -253 253 253 253 253 253 253 253 253 253 253 253
50900 -253 253 253 253 253 253 253 253 253 246 246 246
50901 -231 231 231 206 206 206 198 198 198 226 226 226
50902 - 94 94 94 2 2 6 6 6 6 38 38 38
50903 - 30 30 30 2 2 6 2 2 6 2 2 6
50904 - 2 2 6 2 2 6 62 62 62 66 66 66
50905 - 26 26 26 10 10 10 0 0 0 0 0 0
50906 - 0 0 0 0 0 0 0 0 0 0 0 0
50907 - 0 0 0 0 0 0 0 0 0 0 0 0
50908 - 0 0 0 0 0 0 0 0 0 0 0 0
50909 - 0 0 0 0 0 0 0 0 0 0 0 0
50910 - 0 0 0 0 0 0 0 0 0 0 0 0
50911 - 0 0 0 0 0 0 0 0 0 0 0 0
50912 - 0 0 0 0 0 0 0 0 0 0 0 0
50913 - 0 0 0 0 0 0 0 0 0 10 10 10
50914 - 30 30 30 74 74 74 50 50 50 2 2 6
50915 - 26 26 26 26 26 26 2 2 6 106 106 106
50916 -238 238 238 253 253 253 253 253 253 253 253 253
50917 -253 253 253 253 253 253 253 253 253 253 253 253
50918 -253 253 253 253 253 253 253 253 253 253 253 253
50919 -253 253 253 253 253 253 253 253 253 253 253 253
50920 -253 253 253 253 253 253 253 253 253 253 253 253
50921 -253 253 253 246 246 246 218 218 218 202 202 202
50922 -210 210 210 14 14 14 2 2 6 2 2 6
50923 - 30 30 30 22 22 22 2 2 6 2 2 6
50924 - 2 2 6 2 2 6 18 18 18 86 86 86
50925 - 42 42 42 14 14 14 0 0 0 0 0 0
50926 - 0 0 0 0 0 0 0 0 0 0 0 0
50927 - 0 0 0 0 0 0 0 0 0 0 0 0
50928 - 0 0 0 0 0 0 0 0 0 0 0 0
50929 - 0 0 0 0 0 0 0 0 0 0 0 0
50930 - 0 0 0 0 0 0 0 0 0 0 0 0
50931 - 0 0 0 0 0 0 0 0 0 0 0 0
50932 - 0 0 0 0 0 0 0 0 0 0 0 0
50933 - 0 0 0 0 0 0 0 0 0 14 14 14
50934 - 42 42 42 90 90 90 22 22 22 2 2 6
50935 - 42 42 42 2 2 6 18 18 18 218 218 218
50936 -253 253 253 253 253 253 253 253 253 253 253 253
50937 -253 253 253 253 253 253 253 253 253 253 253 253
50938 -253 253 253 253 253 253 253 253 253 253 253 253
50939 -253 253 253 253 253 253 253 253 253 253 253 253
50940 -253 253 253 253 253 253 253 253 253 253 253 253
50941 -253 253 253 253 253 253 250 250 250 221 221 221
50942 -218 218 218 101 101 101 2 2 6 14 14 14
50943 - 18 18 18 38 38 38 10 10 10 2 2 6
50944 - 2 2 6 2 2 6 2 2 6 78 78 78
50945 - 58 58 58 22 22 22 6 6 6 0 0 0
50946 - 0 0 0 0 0 0 0 0 0 0 0 0
50947 - 0 0 0 0 0 0 0 0 0 0 0 0
50948 - 0 0 0 0 0 0 0 0 0 0 0 0
50949 - 0 0 0 0 0 0 0 0 0 0 0 0
50950 - 0 0 0 0 0 0 0 0 0 0 0 0
50951 - 0 0 0 0 0 0 0 0 0 0 0 0
50952 - 0 0 0 0 0 0 0 0 0 0 0 0
50953 - 0 0 0 0 0 0 6 6 6 18 18 18
50954 - 54 54 54 82 82 82 2 2 6 26 26 26
50955 - 22 22 22 2 2 6 123 123 123 253 253 253
50956 -253 253 253 253 253 253 253 253 253 253 253 253
50957 -253 253 253 253 253 253 253 253 253 253 253 253
50958 -253 253 253 253 253 253 253 253 253 253 253 253
50959 -253 253 253 253 253 253 253 253 253 253 253 253
50960 -253 253 253 253 253 253 253 253 253 253 253 253
50961 -253 253 253 253 253 253 253 253 253 250 250 250
50962 -238 238 238 198 198 198 6 6 6 38 38 38
50963 - 58 58 58 26 26 26 38 38 38 2 2 6
50964 - 2 2 6 2 2 6 2 2 6 46 46 46
50965 - 78 78 78 30 30 30 10 10 10 0 0 0
50966 - 0 0 0 0 0 0 0 0 0 0 0 0
50967 - 0 0 0 0 0 0 0 0 0 0 0 0
50968 - 0 0 0 0 0 0 0 0 0 0 0 0
50969 - 0 0 0 0 0 0 0 0 0 0 0 0
50970 - 0 0 0 0 0 0 0 0 0 0 0 0
50971 - 0 0 0 0 0 0 0 0 0 0 0 0
50972 - 0 0 0 0 0 0 0 0 0 0 0 0
50973 - 0 0 0 0 0 0 10 10 10 30 30 30
50974 - 74 74 74 58 58 58 2 2 6 42 42 42
50975 - 2 2 6 22 22 22 231 231 231 253 253 253
50976 -253 253 253 253 253 253 253 253 253 253 253 253
50977 -253 253 253 253 253 253 253 253 253 250 250 250
50978 -253 253 253 253 253 253 253 253 253 253 253 253
50979 -253 253 253 253 253 253 253 253 253 253 253 253
50980 -253 253 253 253 253 253 253 253 253 253 253 253
50981 -253 253 253 253 253 253 253 253 253 253 253 253
50982 -253 253 253 246 246 246 46 46 46 38 38 38
50983 - 42 42 42 14 14 14 38 38 38 14 14 14
50984 - 2 2 6 2 2 6 2 2 6 6 6 6
50985 - 86 86 86 46 46 46 14 14 14 0 0 0
50986 - 0 0 0 0 0 0 0 0 0 0 0 0
50987 - 0 0 0 0 0 0 0 0 0 0 0 0
50988 - 0 0 0 0 0 0 0 0 0 0 0 0
50989 - 0 0 0 0 0 0 0 0 0 0 0 0
50990 - 0 0 0 0 0 0 0 0 0 0 0 0
50991 - 0 0 0 0 0 0 0 0 0 0 0 0
50992 - 0 0 0 0 0 0 0 0 0 0 0 0
50993 - 0 0 0 6 6 6 14 14 14 42 42 42
50994 - 90 90 90 18 18 18 18 18 18 26 26 26
50995 - 2 2 6 116 116 116 253 253 253 253 253 253
50996 -253 253 253 253 253 253 253 253 253 253 253 253
50997 -253 253 253 253 253 253 250 250 250 238 238 238
50998 -253 253 253 253 253 253 253 253 253 253 253 253
50999 -253 253 253 253 253 253 253 253 253 253 253 253
51000 -253 253 253 253 253 253 253 253 253 253 253 253
51001 -253 253 253 253 253 253 253 253 253 253 253 253
51002 -253 253 253 253 253 253 94 94 94 6 6 6
51003 - 2 2 6 2 2 6 10 10 10 34 34 34
51004 - 2 2 6 2 2 6 2 2 6 2 2 6
51005 - 74 74 74 58 58 58 22 22 22 6 6 6
51006 - 0 0 0 0 0 0 0 0 0 0 0 0
51007 - 0 0 0 0 0 0 0 0 0 0 0 0
51008 - 0 0 0 0 0 0 0 0 0 0 0 0
51009 - 0 0 0 0 0 0 0 0 0 0 0 0
51010 - 0 0 0 0 0 0 0 0 0 0 0 0
51011 - 0 0 0 0 0 0 0 0 0 0 0 0
51012 - 0 0 0 0 0 0 0 0 0 0 0 0
51013 - 0 0 0 10 10 10 26 26 26 66 66 66
51014 - 82 82 82 2 2 6 38 38 38 6 6 6
51015 - 14 14 14 210 210 210 253 253 253 253 253 253
51016 -253 253 253 253 253 253 253 253 253 253 253 253
51017 -253 253 253 253 253 253 246 246 246 242 242 242
51018 -253 253 253 253 253 253 253 253 253 253 253 253
51019 -253 253 253 253 253 253 253 253 253 253 253 253
51020 -253 253 253 253 253 253 253 253 253 253 253 253
51021 -253 253 253 253 253 253 253 253 253 253 253 253
51022 -253 253 253 253 253 253 144 144 144 2 2 6
51023 - 2 2 6 2 2 6 2 2 6 46 46 46
51024 - 2 2 6 2 2 6 2 2 6 2 2 6
51025 - 42 42 42 74 74 74 30 30 30 10 10 10
51026 - 0 0 0 0 0 0 0 0 0 0 0 0
51027 - 0 0 0 0 0 0 0 0 0 0 0 0
51028 - 0 0 0 0 0 0 0 0 0 0 0 0
51029 - 0 0 0 0 0 0 0 0 0 0 0 0
51030 - 0 0 0 0 0 0 0 0 0 0 0 0
51031 - 0 0 0 0 0 0 0 0 0 0 0 0
51032 - 0 0 0 0 0 0 0 0 0 0 0 0
51033 - 6 6 6 14 14 14 42 42 42 90 90 90
51034 - 26 26 26 6 6 6 42 42 42 2 2 6
51035 - 74 74 74 250 250 250 253 253 253 253 253 253
51036 -253 253 253 253 253 253 253 253 253 253 253 253
51037 -253 253 253 253 253 253 242 242 242 242 242 242
51038 -253 253 253 253 253 253 253 253 253 253 253 253
51039 -253 253 253 253 253 253 253 253 253 253 253 253
51040 -253 253 253 253 253 253 253 253 253 253 253 253
51041 -253 253 253 253 253 253 253 253 253 253 253 253
51042 -253 253 253 253 253 253 182 182 182 2 2 6
51043 - 2 2 6 2 2 6 2 2 6 46 46 46
51044 - 2 2 6 2 2 6 2 2 6 2 2 6
51045 - 10 10 10 86 86 86 38 38 38 10 10 10
51046 - 0 0 0 0 0 0 0 0 0 0 0 0
51047 - 0 0 0 0 0 0 0 0 0 0 0 0
51048 - 0 0 0 0 0 0 0 0 0 0 0 0
51049 - 0 0 0 0 0 0 0 0 0 0 0 0
51050 - 0 0 0 0 0 0 0 0 0 0 0 0
51051 - 0 0 0 0 0 0 0 0 0 0 0 0
51052 - 0 0 0 0 0 0 0 0 0 0 0 0
51053 - 10 10 10 26 26 26 66 66 66 82 82 82
51054 - 2 2 6 22 22 22 18 18 18 2 2 6
51055 -149 149 149 253 253 253 253 253 253 253 253 253
51056 -253 253 253 253 253 253 253 253 253 253 253 253
51057 -253 253 253 253 253 253 234 234 234 242 242 242
51058 -253 253 253 253 253 253 253 253 253 253 253 253
51059 -253 253 253 253 253 253 253 253 253 253 253 253
51060 -253 253 253 253 253 253 253 253 253 253 253 253
51061 -253 253 253 253 253 253 253 253 253 253 253 253
51062 -253 253 253 253 253 253 206 206 206 2 2 6
51063 - 2 2 6 2 2 6 2 2 6 38 38 38
51064 - 2 2 6 2 2 6 2 2 6 2 2 6
51065 - 6 6 6 86 86 86 46 46 46 14 14 14
51066 - 0 0 0 0 0 0 0 0 0 0 0 0
51067 - 0 0 0 0 0 0 0 0 0 0 0 0
51068 - 0 0 0 0 0 0 0 0 0 0 0 0
51069 - 0 0 0 0 0 0 0 0 0 0 0 0
51070 - 0 0 0 0 0 0 0 0 0 0 0 0
51071 - 0 0 0 0 0 0 0 0 0 0 0 0
51072 - 0 0 0 0 0 0 0 0 0 6 6 6
51073 - 18 18 18 46 46 46 86 86 86 18 18 18
51074 - 2 2 6 34 34 34 10 10 10 6 6 6
51075 -210 210 210 253 253 253 253 253 253 253 253 253
51076 -253 253 253 253 253 253 253 253 253 253 253 253
51077 -253 253 253 253 253 253 234 234 234 242 242 242
51078 -253 253 253 253 253 253 253 253 253 253 253 253
51079 -253 253 253 253 253 253 253 253 253 253 253 253
51080 -253 253 253 253 253 253 253 253 253 253 253 253
51081 -253 253 253 253 253 253 253 253 253 253 253 253
51082 -253 253 253 253 253 253 221 221 221 6 6 6
51083 - 2 2 6 2 2 6 6 6 6 30 30 30
51084 - 2 2 6 2 2 6 2 2 6 2 2 6
51085 - 2 2 6 82 82 82 54 54 54 18 18 18
51086 - 6 6 6 0 0 0 0 0 0 0 0 0
51087 - 0 0 0 0 0 0 0 0 0 0 0 0
51088 - 0 0 0 0 0 0 0 0 0 0 0 0
51089 - 0 0 0 0 0 0 0 0 0 0 0 0
51090 - 0 0 0 0 0 0 0 0 0 0 0 0
51091 - 0 0 0 0 0 0 0 0 0 0 0 0
51092 - 0 0 0 0 0 0 0 0 0 10 10 10
51093 - 26 26 26 66 66 66 62 62 62 2 2 6
51094 - 2 2 6 38 38 38 10 10 10 26 26 26
51095 -238 238 238 253 253 253 253 253 253 253 253 253
51096 -253 253 253 253 253 253 253 253 253 253 253 253
51097 -253 253 253 253 253 253 231 231 231 238 238 238
51098 -253 253 253 253 253 253 253 253 253 253 253 253
51099 -253 253 253 253 253 253 253 253 253 253 253 253
51100 -253 253 253 253 253 253 253 253 253 253 253 253
51101 -253 253 253 253 253 253 253 253 253 253 253 253
51102 -253 253 253 253 253 253 231 231 231 6 6 6
51103 - 2 2 6 2 2 6 10 10 10 30 30 30
51104 - 2 2 6 2 2 6 2 2 6 2 2 6
51105 - 2 2 6 66 66 66 58 58 58 22 22 22
51106 - 6 6 6 0 0 0 0 0 0 0 0 0
51107 - 0 0 0 0 0 0 0 0 0 0 0 0
51108 - 0 0 0 0 0 0 0 0 0 0 0 0
51109 - 0 0 0 0 0 0 0 0 0 0 0 0
51110 - 0 0 0 0 0 0 0 0 0 0 0 0
51111 - 0 0 0 0 0 0 0 0 0 0 0 0
51112 - 0 0 0 0 0 0 0 0 0 10 10 10
51113 - 38 38 38 78 78 78 6 6 6 2 2 6
51114 - 2 2 6 46 46 46 14 14 14 42 42 42
51115 -246 246 246 253 253 253 253 253 253 253 253 253
51116 -253 253 253 253 253 253 253 253 253 253 253 253
51117 -253 253 253 253 253 253 231 231 231 242 242 242
51118 -253 253 253 253 253 253 253 253 253 253 253 253
51119 -253 253 253 253 253 253 253 253 253 253 253 253
51120 -253 253 253 253 253 253 253 253 253 253 253 253
51121 -253 253 253 253 253 253 253 253 253 253 253 253
51122 -253 253 253 253 253 253 234 234 234 10 10 10
51123 - 2 2 6 2 2 6 22 22 22 14 14 14
51124 - 2 2 6 2 2 6 2 2 6 2 2 6
51125 - 2 2 6 66 66 66 62 62 62 22 22 22
51126 - 6 6 6 0 0 0 0 0 0 0 0 0
51127 - 0 0 0 0 0 0 0 0 0 0 0 0
51128 - 0 0 0 0 0 0 0 0 0 0 0 0
51129 - 0 0 0 0 0 0 0 0 0 0 0 0
51130 - 0 0 0 0 0 0 0 0 0 0 0 0
51131 - 0 0 0 0 0 0 0 0 0 0 0 0
51132 - 0 0 0 0 0 0 6 6 6 18 18 18
51133 - 50 50 50 74 74 74 2 2 6 2 2 6
51134 - 14 14 14 70 70 70 34 34 34 62 62 62
51135 -250 250 250 253 253 253 253 253 253 253 253 253
51136 -253 253 253 253 253 253 253 253 253 253 253 253
51137 -253 253 253 253 253 253 231 231 231 246 246 246
51138 -253 253 253 253 253 253 253 253 253 253 253 253
51139 -253 253 253 253 253 253 253 253 253 253 253 253
51140 -253 253 253 253 253 253 253 253 253 253 253 253
51141 -253 253 253 253 253 253 253 253 253 253 253 253
51142 -253 253 253 253 253 253 234 234 234 14 14 14
51143 - 2 2 6 2 2 6 30 30 30 2 2 6
51144 - 2 2 6 2 2 6 2 2 6 2 2 6
51145 - 2 2 6 66 66 66 62 62 62 22 22 22
51146 - 6 6 6 0 0 0 0 0 0 0 0 0
51147 - 0 0 0 0 0 0 0 0 0 0 0 0
51148 - 0 0 0 0 0 0 0 0 0 0 0 0
51149 - 0 0 0 0 0 0 0 0 0 0 0 0
51150 - 0 0 0 0 0 0 0 0 0 0 0 0
51151 - 0 0 0 0 0 0 0 0 0 0 0 0
51152 - 0 0 0 0 0 0 6 6 6 18 18 18
51153 - 54 54 54 62 62 62 2 2 6 2 2 6
51154 - 2 2 6 30 30 30 46 46 46 70 70 70
51155 -250 250 250 253 253 253 253 253 253 253 253 253
51156 -253 253 253 253 253 253 253 253 253 253 253 253
51157 -253 253 253 253 253 253 231 231 231 246 246 246
51158 -253 253 253 253 253 253 253 253 253 253 253 253
51159 -253 253 253 253 253 253 253 253 253 253 253 253
51160 -253 253 253 253 253 253 253 253 253 253 253 253
51161 -253 253 253 253 253 253 253 253 253 253 253 253
51162 -253 253 253 253 253 253 226 226 226 10 10 10
51163 - 2 2 6 6 6 6 30 30 30 2 2 6
51164 - 2 2 6 2 2 6 2 2 6 2 2 6
51165 - 2 2 6 66 66 66 58 58 58 22 22 22
51166 - 6 6 6 0 0 0 0 0 0 0 0 0
51167 - 0 0 0 0 0 0 0 0 0 0 0 0
51168 - 0 0 0 0 0 0 0 0 0 0 0 0
51169 - 0 0 0 0 0 0 0 0 0 0 0 0
51170 - 0 0 0 0 0 0 0 0 0 0 0 0
51171 - 0 0 0 0 0 0 0 0 0 0 0 0
51172 - 0 0 0 0 0 0 6 6 6 22 22 22
51173 - 58 58 58 62 62 62 2 2 6 2 2 6
51174 - 2 2 6 2 2 6 30 30 30 78 78 78
51175 -250 250 250 253 253 253 253 253 253 253 253 253
51176 -253 253 253 253 253 253 253 253 253 253 253 253
51177 -253 253 253 253 253 253 231 231 231 246 246 246
51178 -253 253 253 253 253 253 253 253 253 253 253 253
51179 -253 253 253 253 253 253 253 253 253 253 253 253
51180 -253 253 253 253 253 253 253 253 253 253 253 253
51181 -253 253 253 253 253 253 253 253 253 253 253 253
51182 -253 253 253 253 253 253 206 206 206 2 2 6
51183 - 22 22 22 34 34 34 18 14 6 22 22 22
51184 - 26 26 26 18 18 18 6 6 6 2 2 6
51185 - 2 2 6 82 82 82 54 54 54 18 18 18
51186 - 6 6 6 0 0 0 0 0 0 0 0 0
51187 - 0 0 0 0 0 0 0 0 0 0 0 0
51188 - 0 0 0 0 0 0 0 0 0 0 0 0
51189 - 0 0 0 0 0 0 0 0 0 0 0 0
51190 - 0 0 0 0 0 0 0 0 0 0 0 0
51191 - 0 0 0 0 0 0 0 0 0 0 0 0
51192 - 0 0 0 0 0 0 6 6 6 26 26 26
51193 - 62 62 62 106 106 106 74 54 14 185 133 11
51194 -210 162 10 121 92 8 6 6 6 62 62 62
51195 -238 238 238 253 253 253 253 253 253 253 253 253
51196 -253 253 253 253 253 253 253 253 253 253 253 253
51197 -253 253 253 253 253 253 231 231 231 246 246 246
51198 -253 253 253 253 253 253 253 253 253 253 253 253
51199 -253 253 253 253 253 253 253 253 253 253 253 253
51200 -253 253 253 253 253 253 253 253 253 253 253 253
51201 -253 253 253 253 253 253 253 253 253 253 253 253
51202 -253 253 253 253 253 253 158 158 158 18 18 18
51203 - 14 14 14 2 2 6 2 2 6 2 2 6
51204 - 6 6 6 18 18 18 66 66 66 38 38 38
51205 - 6 6 6 94 94 94 50 50 50 18 18 18
51206 - 6 6 6 0 0 0 0 0 0 0 0 0
51207 - 0 0 0 0 0 0 0 0 0 0 0 0
51208 - 0 0 0 0 0 0 0 0 0 0 0 0
51209 - 0 0 0 0 0 0 0 0 0 0 0 0
51210 - 0 0 0 0 0 0 0 0 0 0 0 0
51211 - 0 0 0 0 0 0 0 0 0 6 6 6
51212 - 10 10 10 10 10 10 18 18 18 38 38 38
51213 - 78 78 78 142 134 106 216 158 10 242 186 14
51214 -246 190 14 246 190 14 156 118 10 10 10 10
51215 - 90 90 90 238 238 238 253 253 253 253 253 253
51216 -253 253 253 253 253 253 253 253 253 253 253 253
51217 -253 253 253 253 253 253 231 231 231 250 250 250
51218 -253 253 253 253 253 253 253 253 253 253 253 253
51219 -253 253 253 253 253 253 253 253 253 253 253 253
51220 -253 253 253 253 253 253 253 253 253 253 253 253
51221 -253 253 253 253 253 253 253 253 253 246 230 190
51222 -238 204 91 238 204 91 181 142 44 37 26 9
51223 - 2 2 6 2 2 6 2 2 6 2 2 6
51224 - 2 2 6 2 2 6 38 38 38 46 46 46
51225 - 26 26 26 106 106 106 54 54 54 18 18 18
51226 - 6 6 6 0 0 0 0 0 0 0 0 0
51227 - 0 0 0 0 0 0 0 0 0 0 0 0
51228 - 0 0 0 0 0 0 0 0 0 0 0 0
51229 - 0 0 0 0 0 0 0 0 0 0 0 0
51230 - 0 0 0 0 0 0 0 0 0 0 0 0
51231 - 0 0 0 6 6 6 14 14 14 22 22 22
51232 - 30 30 30 38 38 38 50 50 50 70 70 70
51233 -106 106 106 190 142 34 226 170 11 242 186 14
51234 -246 190 14 246 190 14 246 190 14 154 114 10
51235 - 6 6 6 74 74 74 226 226 226 253 253 253
51236 -253 253 253 253 253 253 253 253 253 253 253 253
51237 -253 253 253 253 253 253 231 231 231 250 250 250
51238 -253 253 253 253 253 253 253 253 253 253 253 253
51239 -253 253 253 253 253 253 253 253 253 253 253 253
51240 -253 253 253 253 253 253 253 253 253 253 253 253
51241 -253 253 253 253 253 253 253 253 253 228 184 62
51242 -241 196 14 241 208 19 232 195 16 38 30 10
51243 - 2 2 6 2 2 6 2 2 6 2 2 6
51244 - 2 2 6 6 6 6 30 30 30 26 26 26
51245 -203 166 17 154 142 90 66 66 66 26 26 26
51246 - 6 6 6 0 0 0 0 0 0 0 0 0
51247 - 0 0 0 0 0 0 0 0 0 0 0 0
51248 - 0 0 0 0 0 0 0 0 0 0 0 0
51249 - 0 0 0 0 0 0 0 0 0 0 0 0
51250 - 0 0 0 0 0 0 0 0 0 0 0 0
51251 - 6 6 6 18 18 18 38 38 38 58 58 58
51252 - 78 78 78 86 86 86 101 101 101 123 123 123
51253 -175 146 61 210 150 10 234 174 13 246 186 14
51254 -246 190 14 246 190 14 246 190 14 238 190 10
51255 -102 78 10 2 2 6 46 46 46 198 198 198
51256 -253 253 253 253 253 253 253 253 253 253 253 253
51257 -253 253 253 253 253 253 234 234 234 242 242 242
51258 -253 253 253 253 253 253 253 253 253 253 253 253
51259 -253 253 253 253 253 253 253 253 253 253 253 253
51260 -253 253 253 253 253 253 253 253 253 253 253 253
51261 -253 253 253 253 253 253 253 253 253 224 178 62
51262 -242 186 14 241 196 14 210 166 10 22 18 6
51263 - 2 2 6 2 2 6 2 2 6 2 2 6
51264 - 2 2 6 2 2 6 6 6 6 121 92 8
51265 -238 202 15 232 195 16 82 82 82 34 34 34
51266 - 10 10 10 0 0 0 0 0 0 0 0 0
51267 - 0 0 0 0 0 0 0 0 0 0 0 0
51268 - 0 0 0 0 0 0 0 0 0 0 0 0
51269 - 0 0 0 0 0 0 0 0 0 0 0 0
51270 - 0 0 0 0 0 0 0 0 0 0 0 0
51271 - 14 14 14 38 38 38 70 70 70 154 122 46
51272 -190 142 34 200 144 11 197 138 11 197 138 11
51273 -213 154 11 226 170 11 242 186 14 246 190 14
51274 -246 190 14 246 190 14 246 190 14 246 190 14
51275 -225 175 15 46 32 6 2 2 6 22 22 22
51276 -158 158 158 250 250 250 253 253 253 253 253 253
51277 -253 253 253 253 253 253 253 253 253 253 253 253
51278 -253 253 253 253 253 253 253 253 253 253 253 253
51279 -253 253 253 253 253 253 253 253 253 253 253 253
51280 -253 253 253 253 253 253 253 253 253 253 253 253
51281 -253 253 253 250 250 250 242 242 242 224 178 62
51282 -239 182 13 236 186 11 213 154 11 46 32 6
51283 - 2 2 6 2 2 6 2 2 6 2 2 6
51284 - 2 2 6 2 2 6 61 42 6 225 175 15
51285 -238 190 10 236 186 11 112 100 78 42 42 42
51286 - 14 14 14 0 0 0 0 0 0 0 0 0
51287 - 0 0 0 0 0 0 0 0 0 0 0 0
51288 - 0 0 0 0 0 0 0 0 0 0 0 0
51289 - 0 0 0 0 0 0 0 0 0 0 0 0
51290 - 0 0 0 0 0 0 0 0 0 6 6 6
51291 - 22 22 22 54 54 54 154 122 46 213 154 11
51292 -226 170 11 230 174 11 226 170 11 226 170 11
51293 -236 178 12 242 186 14 246 190 14 246 190 14
51294 -246 190 14 246 190 14 246 190 14 246 190 14
51295 -241 196 14 184 144 12 10 10 10 2 2 6
51296 - 6 6 6 116 116 116 242 242 242 253 253 253
51297 -253 253 253 253 253 253 253 253 253 253 253 253
51298 -253 253 253 253 253 253 253 253 253 253 253 253
51299 -253 253 253 253 253 253 253 253 253 253 253 253
51300 -253 253 253 253 253 253 253 253 253 253 253 253
51301 -253 253 253 231 231 231 198 198 198 214 170 54
51302 -236 178 12 236 178 12 210 150 10 137 92 6
51303 - 18 14 6 2 2 6 2 2 6 2 2 6
51304 - 6 6 6 70 47 6 200 144 11 236 178 12
51305 -239 182 13 239 182 13 124 112 88 58 58 58
51306 - 22 22 22 6 6 6 0 0 0 0 0 0
51307 - 0 0 0 0 0 0 0 0 0 0 0 0
51308 - 0 0 0 0 0 0 0 0 0 0 0 0
51309 - 0 0 0 0 0 0 0 0 0 0 0 0
51310 - 0 0 0 0 0 0 0 0 0 10 10 10
51311 - 30 30 30 70 70 70 180 133 36 226 170 11
51312 -239 182 13 242 186 14 242 186 14 246 186 14
51313 -246 190 14 246 190 14 246 190 14 246 190 14
51314 -246 190 14 246 190 14 246 190 14 246 190 14
51315 -246 190 14 232 195 16 98 70 6 2 2 6
51316 - 2 2 6 2 2 6 66 66 66 221 221 221
51317 -253 253 253 253 253 253 253 253 253 253 253 253
51318 -253 253 253 253 253 253 253 253 253 253 253 253
51319 -253 253 253 253 253 253 253 253 253 253 253 253
51320 -253 253 253 253 253 253 253 253 253 253 253 253
51321 -253 253 253 206 206 206 198 198 198 214 166 58
51322 -230 174 11 230 174 11 216 158 10 192 133 9
51323 -163 110 8 116 81 8 102 78 10 116 81 8
51324 -167 114 7 197 138 11 226 170 11 239 182 13
51325 -242 186 14 242 186 14 162 146 94 78 78 78
51326 - 34 34 34 14 14 14 6 6 6 0 0 0
51327 - 0 0 0 0 0 0 0 0 0 0 0 0
51328 - 0 0 0 0 0 0 0 0 0 0 0 0
51329 - 0 0 0 0 0 0 0 0 0 0 0 0
51330 - 0 0 0 0 0 0 0 0 0 6 6 6
51331 - 30 30 30 78 78 78 190 142 34 226 170 11
51332 -239 182 13 246 190 14 246 190 14 246 190 14
51333 -246 190 14 246 190 14 246 190 14 246 190 14
51334 -246 190 14 246 190 14 246 190 14 246 190 14
51335 -246 190 14 241 196 14 203 166 17 22 18 6
51336 - 2 2 6 2 2 6 2 2 6 38 38 38
51337 -218 218 218 253 253 253 253 253 253 253 253 253
51338 -253 253 253 253 253 253 253 253 253 253 253 253
51339 -253 253 253 253 253 253 253 253 253 253 253 253
51340 -253 253 253 253 253 253 253 253 253 253 253 253
51341 -250 250 250 206 206 206 198 198 198 202 162 69
51342 -226 170 11 236 178 12 224 166 10 210 150 10
51343 -200 144 11 197 138 11 192 133 9 197 138 11
51344 -210 150 10 226 170 11 242 186 14 246 190 14
51345 -246 190 14 246 186 14 225 175 15 124 112 88
51346 - 62 62 62 30 30 30 14 14 14 6 6 6
51347 - 0 0 0 0 0 0 0 0 0 0 0 0
51348 - 0 0 0 0 0 0 0 0 0 0 0 0
51349 - 0 0 0 0 0 0 0 0 0 0 0 0
51350 - 0 0 0 0 0 0 0 0 0 10 10 10
51351 - 30 30 30 78 78 78 174 135 50 224 166 10
51352 -239 182 13 246 190 14 246 190 14 246 190 14
51353 -246 190 14 246 190 14 246 190 14 246 190 14
51354 -246 190 14 246 190 14 246 190 14 246 190 14
51355 -246 190 14 246 190 14 241 196 14 139 102 15
51356 - 2 2 6 2 2 6 2 2 6 2 2 6
51357 - 78 78 78 250 250 250 253 253 253 253 253 253
51358 -253 253 253 253 253 253 253 253 253 253 253 253
51359 -253 253 253 253 253 253 253 253 253 253 253 253
51360 -253 253 253 253 253 253 253 253 253 253 253 253
51361 -250 250 250 214 214 214 198 198 198 190 150 46
51362 -219 162 10 236 178 12 234 174 13 224 166 10
51363 -216 158 10 213 154 11 213 154 11 216 158 10
51364 -226 170 11 239 182 13 246 190 14 246 190 14
51365 -246 190 14 246 190 14 242 186 14 206 162 42
51366 -101 101 101 58 58 58 30 30 30 14 14 14
51367 - 6 6 6 0 0 0 0 0 0 0 0 0
51368 - 0 0 0 0 0 0 0 0 0 0 0 0
51369 - 0 0 0 0 0 0 0 0 0 0 0 0
51370 - 0 0 0 0 0 0 0 0 0 10 10 10
51371 - 30 30 30 74 74 74 174 135 50 216 158 10
51372 -236 178 12 246 190 14 246 190 14 246 190 14
51373 -246 190 14 246 190 14 246 190 14 246 190 14
51374 -246 190 14 246 190 14 246 190 14 246 190 14
51375 -246 190 14 246 190 14 241 196 14 226 184 13
51376 - 61 42 6 2 2 6 2 2 6 2 2 6
51377 - 22 22 22 238 238 238 253 253 253 253 253 253
51378 -253 253 253 253 253 253 253 253 253 253 253 253
51379 -253 253 253 253 253 253 253 253 253 253 253 253
51380 -253 253 253 253 253 253 253 253 253 253 253 253
51381 -253 253 253 226 226 226 187 187 187 180 133 36
51382 -216 158 10 236 178 12 239 182 13 236 178 12
51383 -230 174 11 226 170 11 226 170 11 230 174 11
51384 -236 178 12 242 186 14 246 190 14 246 190 14
51385 -246 190 14 246 190 14 246 186 14 239 182 13
51386 -206 162 42 106 106 106 66 66 66 34 34 34
51387 - 14 14 14 6 6 6 0 0 0 0 0 0
51388 - 0 0 0 0 0 0 0 0 0 0 0 0
51389 - 0 0 0 0 0 0 0 0 0 0 0 0
51390 - 0 0 0 0 0 0 0 0 0 6 6 6
51391 - 26 26 26 70 70 70 163 133 67 213 154 11
51392 -236 178 12 246 190 14 246 190 14 246 190 14
51393 -246 190 14 246 190 14 246 190 14 246 190 14
51394 -246 190 14 246 190 14 246 190 14 246 190 14
51395 -246 190 14 246 190 14 246 190 14 241 196 14
51396 -190 146 13 18 14 6 2 2 6 2 2 6
51397 - 46 46 46 246 246 246 253 253 253 253 253 253
51398 -253 253 253 253 253 253 253 253 253 253 253 253
51399 -253 253 253 253 253 253 253 253 253 253 253 253
51400 -253 253 253 253 253 253 253 253 253 253 253 253
51401 -253 253 253 221 221 221 86 86 86 156 107 11
51402 -216 158 10 236 178 12 242 186 14 246 186 14
51403 -242 186 14 239 182 13 239 182 13 242 186 14
51404 -242 186 14 246 186 14 246 190 14 246 190 14
51405 -246 190 14 246 190 14 246 190 14 246 190 14
51406 -242 186 14 225 175 15 142 122 72 66 66 66
51407 - 30 30 30 10 10 10 0 0 0 0 0 0
51408 - 0 0 0 0 0 0 0 0 0 0 0 0
51409 - 0 0 0 0 0 0 0 0 0 0 0 0
51410 - 0 0 0 0 0 0 0 0 0 6 6 6
51411 - 26 26 26 70 70 70 163 133 67 210 150 10
51412 -236 178 12 246 190 14 246 190 14 246 190 14
51413 -246 190 14 246 190 14 246 190 14 246 190 14
51414 -246 190 14 246 190 14 246 190 14 246 190 14
51415 -246 190 14 246 190 14 246 190 14 246 190 14
51416 -232 195 16 121 92 8 34 34 34 106 106 106
51417 -221 221 221 253 253 253 253 253 253 253 253 253
51418 -253 253 253 253 253 253 253 253 253 253 253 253
51419 -253 253 253 253 253 253 253 253 253 253 253 253
51420 -253 253 253 253 253 253 253 253 253 253 253 253
51421 -242 242 242 82 82 82 18 14 6 163 110 8
51422 -216 158 10 236 178 12 242 186 14 246 190 14
51423 -246 190 14 246 190 14 246 190 14 246 190 14
51424 -246 190 14 246 190 14 246 190 14 246 190 14
51425 -246 190 14 246 190 14 246 190 14 246 190 14
51426 -246 190 14 246 190 14 242 186 14 163 133 67
51427 - 46 46 46 18 18 18 6 6 6 0 0 0
51428 - 0 0 0 0 0 0 0 0 0 0 0 0
51429 - 0 0 0 0 0 0 0 0 0 0 0 0
51430 - 0 0 0 0 0 0 0 0 0 10 10 10
51431 - 30 30 30 78 78 78 163 133 67 210 150 10
51432 -236 178 12 246 186 14 246 190 14 246 190 14
51433 -246 190 14 246 190 14 246 190 14 246 190 14
51434 -246 190 14 246 190 14 246 190 14 246 190 14
51435 -246 190 14 246 190 14 246 190 14 246 190 14
51436 -241 196 14 215 174 15 190 178 144 253 253 253
51437 -253 253 253 253 253 253 253 253 253 253 253 253
51438 -253 253 253 253 253 253 253 253 253 253 253 253
51439 -253 253 253 253 253 253 253 253 253 253 253 253
51440 -253 253 253 253 253 253 253 253 253 218 218 218
51441 - 58 58 58 2 2 6 22 18 6 167 114 7
51442 -216 158 10 236 178 12 246 186 14 246 190 14
51443 -246 190 14 246 190 14 246 190 14 246 190 14
51444 -246 190 14 246 190 14 246 190 14 246 190 14
51445 -246 190 14 246 190 14 246 190 14 246 190 14
51446 -246 190 14 246 186 14 242 186 14 190 150 46
51447 - 54 54 54 22 22 22 6 6 6 0 0 0
51448 - 0 0 0 0 0 0 0 0 0 0 0 0
51449 - 0 0 0 0 0 0 0 0 0 0 0 0
51450 - 0 0 0 0 0 0 0 0 0 14 14 14
51451 - 38 38 38 86 86 86 180 133 36 213 154 11
51452 -236 178 12 246 186 14 246 190 14 246 190 14
51453 -246 190 14 246 190 14 246 190 14 246 190 14
51454 -246 190 14 246 190 14 246 190 14 246 190 14
51455 -246 190 14 246 190 14 246 190 14 246 190 14
51456 -246 190 14 232 195 16 190 146 13 214 214 214
51457 -253 253 253 253 253 253 253 253 253 253 253 253
51458 -253 253 253 253 253 253 253 253 253 253 253 253
51459 -253 253 253 253 253 253 253 253 253 253 253 253
51460 -253 253 253 250 250 250 170 170 170 26 26 26
51461 - 2 2 6 2 2 6 37 26 9 163 110 8
51462 -219 162 10 239 182 13 246 186 14 246 190 14
51463 -246 190 14 246 190 14 246 190 14 246 190 14
51464 -246 190 14 246 190 14 246 190 14 246 190 14
51465 -246 190 14 246 190 14 246 190 14 246 190 14
51466 -246 186 14 236 178 12 224 166 10 142 122 72
51467 - 46 46 46 18 18 18 6 6 6 0 0 0
51468 - 0 0 0 0 0 0 0 0 0 0 0 0
51469 - 0 0 0 0 0 0 0 0 0 0 0 0
51470 - 0 0 0 0 0 0 6 6 6 18 18 18
51471 - 50 50 50 109 106 95 192 133 9 224 166 10
51472 -242 186 14 246 190 14 246 190 14 246 190 14
51473 -246 190 14 246 190 14 246 190 14 246 190 14
51474 -246 190 14 246 190 14 246 190 14 246 190 14
51475 -246 190 14 246 190 14 246 190 14 246 190 14
51476 -242 186 14 226 184 13 210 162 10 142 110 46
51477 -226 226 226 253 253 253 253 253 253 253 253 253
51478 -253 253 253 253 253 253 253 253 253 253 253 253
51479 -253 253 253 253 253 253 253 253 253 253 253 253
51480 -198 198 198 66 66 66 2 2 6 2 2 6
51481 - 2 2 6 2 2 6 50 34 6 156 107 11
51482 -219 162 10 239 182 13 246 186 14 246 190 14
51483 -246 190 14 246 190 14 246 190 14 246 190 14
51484 -246 190 14 246 190 14 246 190 14 246 190 14
51485 -246 190 14 246 190 14 246 190 14 242 186 14
51486 -234 174 13 213 154 11 154 122 46 66 66 66
51487 - 30 30 30 10 10 10 0 0 0 0 0 0
51488 - 0 0 0 0 0 0 0 0 0 0 0 0
51489 - 0 0 0 0 0 0 0 0 0 0 0 0
51490 - 0 0 0 0 0 0 6 6 6 22 22 22
51491 - 58 58 58 154 121 60 206 145 10 234 174 13
51492 -242 186 14 246 186 14 246 190 14 246 190 14
51493 -246 190 14 246 190 14 246 190 14 246 190 14
51494 -246 190 14 246 190 14 246 190 14 246 190 14
51495 -246 190 14 246 190 14 246 190 14 246 190 14
51496 -246 186 14 236 178 12 210 162 10 163 110 8
51497 - 61 42 6 138 138 138 218 218 218 250 250 250
51498 -253 253 253 253 253 253 253 253 253 250 250 250
51499 -242 242 242 210 210 210 144 144 144 66 66 66
51500 - 6 6 6 2 2 6 2 2 6 2 2 6
51501 - 2 2 6 2 2 6 61 42 6 163 110 8
51502 -216 158 10 236 178 12 246 190 14 246 190 14
51503 -246 190 14 246 190 14 246 190 14 246 190 14
51504 -246 190 14 246 190 14 246 190 14 246 190 14
51505 -246 190 14 239 182 13 230 174 11 216 158 10
51506 -190 142 34 124 112 88 70 70 70 38 38 38
51507 - 18 18 18 6 6 6 0 0 0 0 0 0
51508 - 0 0 0 0 0 0 0 0 0 0 0 0
51509 - 0 0 0 0 0 0 0 0 0 0 0 0
51510 - 0 0 0 0 0 0 6 6 6 22 22 22
51511 - 62 62 62 168 124 44 206 145 10 224 166 10
51512 -236 178 12 239 182 13 242 186 14 242 186 14
51513 -246 186 14 246 190 14 246 190 14 246 190 14
51514 -246 190 14 246 190 14 246 190 14 246 190 14
51515 -246 190 14 246 190 14 246 190 14 246 190 14
51516 -246 190 14 236 178 12 216 158 10 175 118 6
51517 - 80 54 7 2 2 6 6 6 6 30 30 30
51518 - 54 54 54 62 62 62 50 50 50 38 38 38
51519 - 14 14 14 2 2 6 2 2 6 2 2 6
51520 - 2 2 6 2 2 6 2 2 6 2 2 6
51521 - 2 2 6 6 6 6 80 54 7 167 114 7
51522 -213 154 11 236 178 12 246 190 14 246 190 14
51523 -246 190 14 246 190 14 246 190 14 246 190 14
51524 -246 190 14 242 186 14 239 182 13 239 182 13
51525 -230 174 11 210 150 10 174 135 50 124 112 88
51526 - 82 82 82 54 54 54 34 34 34 18 18 18
51527 - 6 6 6 0 0 0 0 0 0 0 0 0
51528 - 0 0 0 0 0 0 0 0 0 0 0 0
51529 - 0 0 0 0 0 0 0 0 0 0 0 0
51530 - 0 0 0 0 0 0 6 6 6 18 18 18
51531 - 50 50 50 158 118 36 192 133 9 200 144 11
51532 -216 158 10 219 162 10 224 166 10 226 170 11
51533 -230 174 11 236 178 12 239 182 13 239 182 13
51534 -242 186 14 246 186 14 246 190 14 246 190 14
51535 -246 190 14 246 190 14 246 190 14 246 190 14
51536 -246 186 14 230 174 11 210 150 10 163 110 8
51537 -104 69 6 10 10 10 2 2 6 2 2 6
51538 - 2 2 6 2 2 6 2 2 6 2 2 6
51539 - 2 2 6 2 2 6 2 2 6 2 2 6
51540 - 2 2 6 2 2 6 2 2 6 2 2 6
51541 - 2 2 6 6 6 6 91 60 6 167 114 7
51542 -206 145 10 230 174 11 242 186 14 246 190 14
51543 -246 190 14 246 190 14 246 186 14 242 186 14
51544 -239 182 13 230 174 11 224 166 10 213 154 11
51545 -180 133 36 124 112 88 86 86 86 58 58 58
51546 - 38 38 38 22 22 22 10 10 10 6 6 6
51547 - 0 0 0 0 0 0 0 0 0 0 0 0
51548 - 0 0 0 0 0 0 0 0 0 0 0 0
51549 - 0 0 0 0 0 0 0 0 0 0 0 0
51550 - 0 0 0 0 0 0 0 0 0 14 14 14
51551 - 34 34 34 70 70 70 138 110 50 158 118 36
51552 -167 114 7 180 123 7 192 133 9 197 138 11
51553 -200 144 11 206 145 10 213 154 11 219 162 10
51554 -224 166 10 230 174 11 239 182 13 242 186 14
51555 -246 186 14 246 186 14 246 186 14 246 186 14
51556 -239 182 13 216 158 10 185 133 11 152 99 6
51557 -104 69 6 18 14 6 2 2 6 2 2 6
51558 - 2 2 6 2 2 6 2 2 6 2 2 6
51559 - 2 2 6 2 2 6 2 2 6 2 2 6
51560 - 2 2 6 2 2 6 2 2 6 2 2 6
51561 - 2 2 6 6 6 6 80 54 7 152 99 6
51562 -192 133 9 219 162 10 236 178 12 239 182 13
51563 -246 186 14 242 186 14 239 182 13 236 178 12
51564 -224 166 10 206 145 10 192 133 9 154 121 60
51565 - 94 94 94 62 62 62 42 42 42 22 22 22
51566 - 14 14 14 6 6 6 0 0 0 0 0 0
51567 - 0 0 0 0 0 0 0 0 0 0 0 0
51568 - 0 0 0 0 0 0 0 0 0 0 0 0
51569 - 0 0 0 0 0 0 0 0 0 0 0 0
51570 - 0 0 0 0 0 0 0 0 0 6 6 6
51571 - 18 18 18 34 34 34 58 58 58 78 78 78
51572 -101 98 89 124 112 88 142 110 46 156 107 11
51573 -163 110 8 167 114 7 175 118 6 180 123 7
51574 -185 133 11 197 138 11 210 150 10 219 162 10
51575 -226 170 11 236 178 12 236 178 12 234 174 13
51576 -219 162 10 197 138 11 163 110 8 130 83 6
51577 - 91 60 6 10 10 10 2 2 6 2 2 6
51578 - 18 18 18 38 38 38 38 38 38 38 38 38
51579 - 38 38 38 38 38 38 38 38 38 38 38 38
51580 - 38 38 38 38 38 38 26 26 26 2 2 6
51581 - 2 2 6 6 6 6 70 47 6 137 92 6
51582 -175 118 6 200 144 11 219 162 10 230 174 11
51583 -234 174 13 230 174 11 219 162 10 210 150 10
51584 -192 133 9 163 110 8 124 112 88 82 82 82
51585 - 50 50 50 30 30 30 14 14 14 6 6 6
51586 - 0 0 0 0 0 0 0 0 0 0 0 0
51587 - 0 0 0 0 0 0 0 0 0 0 0 0
51588 - 0 0 0 0 0 0 0 0 0 0 0 0
51589 - 0 0 0 0 0 0 0 0 0 0 0 0
51590 - 0 0 0 0 0 0 0 0 0 0 0 0
51591 - 6 6 6 14 14 14 22 22 22 34 34 34
51592 - 42 42 42 58 58 58 74 74 74 86 86 86
51593 -101 98 89 122 102 70 130 98 46 121 87 25
51594 -137 92 6 152 99 6 163 110 8 180 123 7
51595 -185 133 11 197 138 11 206 145 10 200 144 11
51596 -180 123 7 156 107 11 130 83 6 104 69 6
51597 - 50 34 6 54 54 54 110 110 110 101 98 89
51598 - 86 86 86 82 82 82 78 78 78 78 78 78
51599 - 78 78 78 78 78 78 78 78 78 78 78 78
51600 - 78 78 78 82 82 82 86 86 86 94 94 94
51601 -106 106 106 101 101 101 86 66 34 124 80 6
51602 -156 107 11 180 123 7 192 133 9 200 144 11
51603 -206 145 10 200 144 11 192 133 9 175 118 6
51604 -139 102 15 109 106 95 70 70 70 42 42 42
51605 - 22 22 22 10 10 10 0 0 0 0 0 0
51606 - 0 0 0 0 0 0 0 0 0 0 0 0
51607 - 0 0 0 0 0 0 0 0 0 0 0 0
51608 - 0 0 0 0 0 0 0 0 0 0 0 0
51609 - 0 0 0 0 0 0 0 0 0 0 0 0
51610 - 0 0 0 0 0 0 0 0 0 0 0 0
51611 - 0 0 0 0 0 0 6 6 6 10 10 10
51612 - 14 14 14 22 22 22 30 30 30 38 38 38
51613 - 50 50 50 62 62 62 74 74 74 90 90 90
51614 -101 98 89 112 100 78 121 87 25 124 80 6
51615 -137 92 6 152 99 6 152 99 6 152 99 6
51616 -138 86 6 124 80 6 98 70 6 86 66 30
51617 -101 98 89 82 82 82 58 58 58 46 46 46
51618 - 38 38 38 34 34 34 34 34 34 34 34 34
51619 - 34 34 34 34 34 34 34 34 34 34 34 34
51620 - 34 34 34 34 34 34 38 38 38 42 42 42
51621 - 54 54 54 82 82 82 94 86 76 91 60 6
51622 -134 86 6 156 107 11 167 114 7 175 118 6
51623 -175 118 6 167 114 7 152 99 6 121 87 25
51624 -101 98 89 62 62 62 34 34 34 18 18 18
51625 - 6 6 6 0 0 0 0 0 0 0 0 0
51626 - 0 0 0 0 0 0 0 0 0 0 0 0
51627 - 0 0 0 0 0 0 0 0 0 0 0 0
51628 - 0 0 0 0 0 0 0 0 0 0 0 0
51629 - 0 0 0 0 0 0 0 0 0 0 0 0
51630 - 0 0 0 0 0 0 0 0 0 0 0 0
51631 - 0 0 0 0 0 0 0 0 0 0 0 0
51632 - 0 0 0 6 6 6 6 6 6 10 10 10
51633 - 18 18 18 22 22 22 30 30 30 42 42 42
51634 - 50 50 50 66 66 66 86 86 86 101 98 89
51635 -106 86 58 98 70 6 104 69 6 104 69 6
51636 -104 69 6 91 60 6 82 62 34 90 90 90
51637 - 62 62 62 38 38 38 22 22 22 14 14 14
51638 - 10 10 10 10 10 10 10 10 10 10 10 10
51639 - 10 10 10 10 10 10 6 6 6 10 10 10
51640 - 10 10 10 10 10 10 10 10 10 14 14 14
51641 - 22 22 22 42 42 42 70 70 70 89 81 66
51642 - 80 54 7 104 69 6 124 80 6 137 92 6
51643 -134 86 6 116 81 8 100 82 52 86 86 86
51644 - 58 58 58 30 30 30 14 14 14 6 6 6
51645 - 0 0 0 0 0 0 0 0 0 0 0 0
51646 - 0 0 0 0 0 0 0 0 0 0 0 0
51647 - 0 0 0 0 0 0 0 0 0 0 0 0
51648 - 0 0 0 0 0 0 0 0 0 0 0 0
51649 - 0 0 0 0 0 0 0 0 0 0 0 0
51650 - 0 0 0 0 0 0 0 0 0 0 0 0
51651 - 0 0 0 0 0 0 0 0 0 0 0 0
51652 - 0 0 0 0 0 0 0 0 0 0 0 0
51653 - 0 0 0 6 6 6 10 10 10 14 14 14
51654 - 18 18 18 26 26 26 38 38 38 54 54 54
51655 - 70 70 70 86 86 86 94 86 76 89 81 66
51656 - 89 81 66 86 86 86 74 74 74 50 50 50
51657 - 30 30 30 14 14 14 6 6 6 0 0 0
51658 - 0 0 0 0 0 0 0 0 0 0 0 0
51659 - 0 0 0 0 0 0 0 0 0 0 0 0
51660 - 0 0 0 0 0 0 0 0 0 0 0 0
51661 - 6 6 6 18 18 18 34 34 34 58 58 58
51662 - 82 82 82 89 81 66 89 81 66 89 81 66
51663 - 94 86 66 94 86 76 74 74 74 50 50 50
51664 - 26 26 26 14 14 14 6 6 6 0 0 0
51665 - 0 0 0 0 0 0 0 0 0 0 0 0
51666 - 0 0 0 0 0 0 0 0 0 0 0 0
51667 - 0 0 0 0 0 0 0 0 0 0 0 0
51668 - 0 0 0 0 0 0 0 0 0 0 0 0
51669 - 0 0 0 0 0 0 0 0 0 0 0 0
51670 - 0 0 0 0 0 0 0 0 0 0 0 0
51671 - 0 0 0 0 0 0 0 0 0 0 0 0
51672 - 0 0 0 0 0 0 0 0 0 0 0 0
51673 - 0 0 0 0 0 0 0 0 0 0 0 0
51674 - 6 6 6 6 6 6 14 14 14 18 18 18
51675 - 30 30 30 38 38 38 46 46 46 54 54 54
51676 - 50 50 50 42 42 42 30 30 30 18 18 18
51677 - 10 10 10 0 0 0 0 0 0 0 0 0
51678 - 0 0 0 0 0 0 0 0 0 0 0 0
51679 - 0 0 0 0 0 0 0 0 0 0 0 0
51680 - 0 0 0 0 0 0 0 0 0 0 0 0
51681 - 0 0 0 6 6 6 14 14 14 26 26 26
51682 - 38 38 38 50 50 50 58 58 58 58 58 58
51683 - 54 54 54 42 42 42 30 30 30 18 18 18
51684 - 10 10 10 0 0 0 0 0 0 0 0 0
51685 - 0 0 0 0 0 0 0 0 0 0 0 0
51686 - 0 0 0 0 0 0 0 0 0 0 0 0
51687 - 0 0 0 0 0 0 0 0 0 0 0 0
51688 - 0 0 0 0 0 0 0 0 0 0 0 0
51689 - 0 0 0 0 0 0 0 0 0 0 0 0
51690 - 0 0 0 0 0 0 0 0 0 0 0 0
51691 - 0 0 0 0 0 0 0 0 0 0 0 0
51692 - 0 0 0 0 0 0 0 0 0 0 0 0
51693 - 0 0 0 0 0 0 0 0 0 0 0 0
51694 - 0 0 0 0 0 0 0 0 0 6 6 6
51695 - 6 6 6 10 10 10 14 14 14 18 18 18
51696 - 18 18 18 14 14 14 10 10 10 6 6 6
51697 - 0 0 0 0 0 0 0 0 0 0 0 0
51698 - 0 0 0 0 0 0 0 0 0 0 0 0
51699 - 0 0 0 0 0 0 0 0 0 0 0 0
51700 - 0 0 0 0 0 0 0 0 0 0 0 0
51701 - 0 0 0 0 0 0 0 0 0 6 6 6
51702 - 14 14 14 18 18 18 22 22 22 22 22 22
51703 - 18 18 18 14 14 14 10 10 10 6 6 6
51704 - 0 0 0 0 0 0 0 0 0 0 0 0
51705 - 0 0 0 0 0 0 0 0 0 0 0 0
51706 - 0 0 0 0 0 0 0 0 0 0 0 0
51707 - 0 0 0 0 0 0 0 0 0 0 0 0
51708 - 0 0 0 0 0 0 0 0 0 0 0 0
51709 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51713 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51714 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51715 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51716 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51719 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51720 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51721 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51722 +4 4 4 4 4 4
51723 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51724 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51727 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51728 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51729 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51730 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51731 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51732 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51733 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51734 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51735 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51736 +4 4 4 4 4 4
51737 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51738 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51741 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51742 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51743 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51744 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51745 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51746 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51747 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51748 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51749 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51750 +4 4 4 4 4 4
51751 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51752 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51754 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51755 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51756 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51757 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51758 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51760 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51761 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51762 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51763 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51764 +4 4 4 4 4 4
51765 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51766 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51768 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51769 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51770 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51771 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51772 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51774 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51775 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51776 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51777 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51778 +4 4 4 4 4 4
51779 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51780 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51781 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51782 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51783 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51784 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51785 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51786 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51788 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51789 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51790 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51791 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51792 +4 4 4 4 4 4
51793 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51794 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51795 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51796 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51797 +4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
51798 +0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
51799 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51800 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51802 +4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
51803 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
51804 +4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
51805 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51806 +4 4 4 4 4 4
51807 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51808 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51809 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51810 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51811 +4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
51812 +37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
51813 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51814 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51816 +4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
51817 +2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
51818 +4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
51819 +1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51820 +4 4 4 4 4 4
51821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51822 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51823 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51824 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51825 +2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
51826 +153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
51827 +0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
51828 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51829 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51830 +4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
51831 +60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
51832 +4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
51833 +2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
51834 +4 4 4 4 4 4
51835 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51836 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51837 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51838 +4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
51839 +4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
51840 +165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
51841 +1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
51842 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51843 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
51844 +3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
51845 +163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
51846 +0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
51847 +37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
51848 +4 4 4 4 4 4
51849 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51850 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51851 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51852 +4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
51853 +37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
51854 +156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
51855 +125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
51856 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
51857 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
51858 +0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
51859 +174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
51860 +0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
51861 +64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
51862 +4 4 4 4 4 4
51863 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51864 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51865 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
51866 +5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
51867 +156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
51868 +156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
51869 +174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
51870 +1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
51871 +4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
51872 +13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
51873 +174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
51874 +22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
51875 +90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
51876 +4 4 4 4 4 4
51877 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51878 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51879 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
51880 +0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
51881 +174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
51882 +156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
51883 +163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
51884 +4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
51885 +5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
51886 +131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
51887 +190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
51888 +90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
51889 +31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
51890 +4 4 4 4 4 4
51891 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51892 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51893 +4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
51894 +4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
51895 +155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
51896 +167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
51897 +153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
51898 +41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
51899 +1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
51900 +177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
51901 +125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
51902 +136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
51903 +7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
51904 +4 4 4 4 4 4
51905 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51906 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51907 +4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
51908 +125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
51909 +156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
51910 +137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
51911 +156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
51912 +167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
51913 +0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
51914 +166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
51915 +6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
51916 +90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
51917 +1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
51918 +4 4 4 4 4 4
51919 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51920 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51921 +1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
51922 +167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
51923 +157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
51924 +26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
51925 +158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
51926 +165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
51927 +60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
51928 +137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
51929 +52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
51930 +13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
51931 +4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
51932 +4 4 4 4 4 4
51933 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51934 +4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
51935 +0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
51936 +158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
51937 +167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
51938 +4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
51939 +174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
51940 +155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
51941 +137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
51942 +16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
51943 +136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
51944 +2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
51945 +4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
51946 +4 4 4 4 4 4
51947 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
51948 +4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
51949 +37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
51950 +157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
51951 +153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
51952 +4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
51953 +125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
51954 +156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
51955 +174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
51956 +4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
51957 +136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
51958 +1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
51959 +2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
51960 +0 0 0 4 4 4
51961 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
51962 +4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
51963 +158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
51964 +153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
51965 +37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
51966 +4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
51967 +4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
51968 +154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
51969 +174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
51970 +32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
51971 +28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
51972 +50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
51973 +0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
51974 +2 0 0 0 0 0
51975 +4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
51976 +0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
51977 +174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
51978 +165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
51979 +4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
51980 +4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
51981 +4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
51982 +174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
51983 +60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
51984 +136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
51985 +22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
51986 +136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
51987 +26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
51988 +37 38 37 0 0 0
51989 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
51990 +13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
51991 +153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
51992 +177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
51993 +4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
51994 +5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
51995 +6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
51996 +166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
51997 +4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
51998 +146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
51999 +71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
52000 +90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
52001 +125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
52002 +85 115 134 4 0 0
52003 +4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
52004 +125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
52005 +155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
52006 +125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
52007 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
52008 +0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
52009 +5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
52010 +37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
52011 +4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
52012 +90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
52013 +2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
52014 +13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
52015 +166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
52016 +60 73 81 4 0 0
52017 +4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
52018 +174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
52019 +156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
52020 +4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
52021 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
52022 +10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
52023 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
52024 +4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
52025 +80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
52026 +28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
52027 +50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
52028 +1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
52029 +167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
52030 +16 19 21 4 0 0
52031 +4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
52032 +158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
52033 +167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
52034 +4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
52035 +4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
52036 +80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
52037 +4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
52038 +3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
52039 +146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
52040 +68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
52041 +136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
52042 +24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
52043 +163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
52044 +4 0 0 4 3 3
52045 +3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
52046 +156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
52047 +155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
52048 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
52049 +2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
52050 +136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
52051 +0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
52052 +0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
52053 +136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
52054 +28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
52055 +22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
52056 +137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
52057 +60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
52058 +3 2 2 4 4 4
52059 +3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
52060 +157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
52061 +37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
52062 +4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
52063 +0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
52064 +101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
52065 +14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
52066 +22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
52067 +136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
52068 +17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
52069 +2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
52070 +166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
52071 +13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
52072 +4 4 4 4 4 4
52073 +1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
52074 +163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
52075 +4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
52076 +4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
52077 +40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
52078 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
52079 +101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
52080 +136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
52081 +136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
52082 +136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
52083 +3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
52084 +174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
52085 +4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
52086 +4 4 4 4 4 4
52087 +4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
52088 +155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
52089 +4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
52090 +4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
52091 +101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
52092 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
52093 +136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
52094 +136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
52095 +136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
52096 +90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
52097 +85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
52098 +167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
52099 +6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
52100 +5 5 5 5 5 5
52101 +1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
52102 +131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
52103 +6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
52104 +0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
52105 +101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
52106 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
52107 +101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
52108 +136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
52109 +101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
52110 +7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
52111 +174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
52112 +24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
52113 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
52114 +5 5 5 4 4 4
52115 +4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
52116 +131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
52117 +6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
52118 +13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
52119 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
52120 +101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
52121 +101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
52122 +136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
52123 +136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
52124 +2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
52125 +174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
52126 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
52127 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52128 +4 4 4 4 4 4
52129 +1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
52130 +137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
52131 +4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
52132 +64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
52133 +90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
52134 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
52135 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
52136 +136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
52137 +101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
52138 +37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
52139 +167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
52140 +3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
52141 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52142 +4 4 4 4 4 4
52143 +4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
52144 +153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
52145 +4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
52146 +90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
52147 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
52148 +90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
52149 +101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
52150 +101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
52151 +35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
52152 +154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
52153 +60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
52154 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52155 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52156 +4 4 4 4 4 4
52157 +1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
52158 +153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
52159 +4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
52160 +64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
52161 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
52162 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
52163 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
52164 +136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
52165 +13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
52166 +174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
52167 +6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
52168 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52169 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52170 +4 4 4 4 4 4
52171 +4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
52172 +156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
52173 +4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
52174 +90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
52175 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
52176 +90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
52177 +101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
52178 +101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
52179 +2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
52180 +174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
52181 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52182 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52183 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52184 +4 4 4 4 4 4
52185 +3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
52186 +158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
52187 +4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
52188 +37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
52189 +90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
52190 +90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
52191 +101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
52192 +90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
52193 +5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
52194 +167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
52195 +6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
52196 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52197 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52198 +4 4 4 4 4 4
52199 +4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
52200 +163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
52201 +4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
52202 +18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
52203 +64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
52204 +90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
52205 +101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
52206 +13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
52207 +3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
52208 +174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
52209 +4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
52210 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52211 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52212 +4 4 4 4 4 4
52213 +1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
52214 +167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
52215 +4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
52216 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
52217 +26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
52218 +90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
52219 +101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
52220 +7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
52221 +4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
52222 +174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
52223 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52224 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52225 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52226 +4 4 4 4 4 4
52227 +4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
52228 +174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
52229 +5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
52230 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
52231 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
52232 +90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
52233 +101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
52234 +2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
52235 +3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
52236 +153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
52237 +4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52238 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52239 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52240 +4 4 4 4 4 4
52241 +1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
52242 +174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
52243 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
52244 +18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
52245 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
52246 +26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
52247 +35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
52248 +2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
52249 +3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
52250 +131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
52251 +4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52252 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52253 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52254 +4 4 4 4 4 4
52255 +3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
52256 +174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
52257 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
52258 +18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
52259 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
52260 +26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
52261 +7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
52262 +4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
52263 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
52264 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52265 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52266 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52267 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52268 +4 4 4 4 4 4
52269 +1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
52270 +174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
52271 +5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
52272 +18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
52273 +18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
52274 +26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
52275 +28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
52276 +3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
52277 +4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
52278 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52279 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52280 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52281 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52282 +4 4 4 4 4 4
52283 +4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
52284 +174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
52285 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
52286 +10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
52287 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
52288 +18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
52289 +90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
52290 +3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
52291 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
52292 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52293 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52294 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52295 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52296 +4 4 4 4 4 4
52297 +1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
52298 +177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
52299 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
52300 +10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
52301 +26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
52302 +6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
52303 +10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
52304 +2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
52305 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
52306 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52307 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52308 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52309 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52310 +4 4 4 4 4 4
52311 +4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
52312 +177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
52313 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
52314 +10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
52315 +26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
52316 +7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
52317 +3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
52318 +21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
52319 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
52320 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52321 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52322 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52323 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52324 +4 4 4 4 4 4
52325 +3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
52326 +190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
52327 +5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
52328 +10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
52329 +24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
52330 +18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
52331 +28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
52332 +26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
52333 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
52334 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52335 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52336 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52337 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52338 +4 4 4 4 4 4
52339 +4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
52340 +190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
52341 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
52342 +10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
52343 +0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
52344 +26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
52345 +37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
52346 +90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
52347 +4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
52348 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52349 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52350 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52351 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52352 +4 4 4 4 4 4
52353 +4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
52354 +193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
52355 +5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
52356 +10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
52357 +1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
52358 +26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
52359 +22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
52360 +26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
52361 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
52362 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52363 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52364 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52365 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52366 +4 4 4 4 4 4
52367 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
52368 +190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
52369 +5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
52370 +10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
52371 +2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
52372 +26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
52373 +10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
52374 +26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
52375 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
52376 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52377 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52378 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52379 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52380 +4 4 4 4 4 4
52381 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
52382 +193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
52383 +5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
52384 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
52385 +13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
52386 +10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
52387 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
52388 +26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
52389 +4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
52390 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52391 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52392 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52393 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52394 +4 4 4 4 4 4
52395 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
52396 +190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
52397 +5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
52398 +28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
52399 +10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
52400 +28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
52401 +26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
52402 +26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
52403 +4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
52404 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52405 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52406 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52407 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52408 +4 4 4 4 4 4
52409 +4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
52410 +193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
52411 +5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
52412 +4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
52413 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
52414 +10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
52415 +18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
52416 +22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
52417 +4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
52418 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52419 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52420 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52421 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52422 +4 4 4 4 4 4
52423 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
52424 +190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
52425 +6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
52426 +1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
52427 +18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
52428 +10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
52429 +26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
52430 +1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
52431 +5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
52432 +137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52433 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52434 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52435 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52436 +4 4 4 4 4 4
52437 +4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
52438 +193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
52439 +2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
52440 +4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
52441 +10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
52442 +10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
52443 +26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
52444 +2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
52445 +3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
52446 +131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52447 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52448 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52449 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52450 +4 4 4 4 4 4
52451 +4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
52452 +193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
52453 +0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
52454 +4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
52455 +13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
52456 +10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
52457 +28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
52458 +4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
52459 +0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
52460 +125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
52461 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52462 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52463 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52464 +4 4 4 4 4 4
52465 +4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
52466 +193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
52467 +120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
52468 +4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
52469 +4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
52470 +10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
52471 +4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
52472 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
52473 +24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
52474 +125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
52475 +0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52476 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52477 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52478 +4 4 4 4 4 4
52479 +4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
52480 +174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
52481 +220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
52482 +3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
52483 +4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
52484 +10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
52485 +1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
52486 +5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
52487 +137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
52488 +125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
52489 +0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52490 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52491 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52492 +4 4 4 4 4 4
52493 +5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
52494 +193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
52495 +220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
52496 +4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
52497 +4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
52498 +22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
52499 +4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52500 +1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
52501 +166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
52502 +125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
52503 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
52504 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52505 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52506 +4 4 4 4 4 4
52507 +4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
52508 +220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
52509 +205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
52510 +24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
52511 +4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
52512 +4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
52513 +4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
52514 +2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
52515 +156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
52516 +137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
52517 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52518 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52519 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52520 +4 4 4 4 4 4
52521 +5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
52522 +125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
52523 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
52524 +193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
52525 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
52526 +1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
52527 +5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
52528 +60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
52529 +153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
52530 +125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
52531 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52532 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52533 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52534 +4 4 4 4 4 4
52535 +4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
52536 +6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
52537 +193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
52538 +244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
52539 +0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
52540 +4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
52541 +3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
52542 +220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
52543 +153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
52544 +13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
52545 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52546 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52547 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52548 +4 4 4 4 4 4
52549 +5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
52550 +6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
52551 +244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
52552 +220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
52553 +3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
52554 +4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
52555 +0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
52556 +177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
52557 +158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
52558 +4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
52559 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52560 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52561 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52562 +4 4 4 4 4 4
52563 +5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
52564 +6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
52565 +177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
52566 +220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
52567 +125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
52568 +4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
52569 +37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
52570 +174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
52571 +158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
52572 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
52573 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52574 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52575 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52576 +4 4 4 4 4 4
52577 +4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
52578 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
52579 +26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
52580 +205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
52581 +244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
52582 +0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
52583 +177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
52584 +174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
52585 +60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
52586 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52587 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52588 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52589 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52590 +4 4 4 4 4 4
52591 +5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
52592 +6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
52593 +6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
52594 +220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
52595 +220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
52596 +0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
52597 +220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
52598 +174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
52599 +4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
52600 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52601 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52602 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52603 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52604 +4 4 4 4 4 4
52605 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
52606 +6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
52607 +4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
52608 +220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
52609 +205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
52610 +60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
52611 +177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
52612 +190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
52613 +4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52614 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52615 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52616 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52617 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52618 +4 4 4 4 4 4
52619 +4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
52620 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
52621 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
52622 +125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
52623 +205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
52624 +193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
52625 +190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
52626 +153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
52627 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52628 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52629 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52630 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52631 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52632 +4 4 4 4 4 4
52633 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
52634 +6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
52635 +4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
52636 +4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
52637 +205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
52638 +220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
52639 +174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
52640 +6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
52641 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52642 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52643 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52644 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52645 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52646 +4 4 4 4 4 4
52647 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
52648 +5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
52649 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
52650 +4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
52651 +220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
52652 +190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
52653 +193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
52654 +4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
52655 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52656 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52657 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52658 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52659 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52660 +4 4 4 4 4 4
52661 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52662 +4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
52663 +4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
52664 +6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
52665 +174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
52666 +193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
52667 +193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
52668 +6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
52669 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52670 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52671 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52672 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52673 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52674 +4 4 4 4 4 4
52675 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52676 +4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
52677 +5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
52678 +5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
52679 +6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
52680 +193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
52681 +60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
52682 +5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
52683 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52684 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52685 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52686 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52687 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52688 +4 4 4 4 4 4
52689 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52690 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52691 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
52692 +5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
52693 +4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
52694 +193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
52695 +6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
52696 +4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
52697 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52698 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52699 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52700 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52701 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52702 +4 4 4 4 4 4
52703 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52704 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52705 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
52706 +4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
52707 +6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
52708 +153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
52709 +6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
52710 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52711 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52712 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52713 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52714 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52715 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52716 +4 4 4 4 4 4
52717 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52718 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52719 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52720 +4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
52721 +6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
52722 +24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
52723 +6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
52724 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52725 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52726 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52727 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52728 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52729 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52730 +4 4 4 4 4 4
52731 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52732 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52733 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52734 +4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
52735 +4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
52736 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
52737 +4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
52738 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52739 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52740 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52741 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52742 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52743 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52744 +4 4 4 4 4 4
52745 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52746 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52747 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52748 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
52749 +5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
52750 +6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
52751 +6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
52752 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52753 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52754 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52755 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52756 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52757 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52758 +4 4 4 4 4 4
52759 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52760 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52761 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52762 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
52763 +4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
52764 +4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
52765 +6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
52766 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52767 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52768 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52769 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52770 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52771 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52772 +4 4 4 4 4 4
52773 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52774 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52775 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52776 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52777 +4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
52778 +6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
52779 +4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
52780 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52781 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52782 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52783 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52784 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52785 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52786 +4 4 4 4 4 4
52787 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52788 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52789 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52790 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52791 +4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
52792 +4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
52793 +5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52794 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52795 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52796 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52797 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52798 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52799 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52800 +4 4 4 4 4 4
52801 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52802 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52803 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52804 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52805 +4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
52806 +5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
52807 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52808 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52809 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52810 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52811 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52812 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52813 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52814 +4 4 4 4 4 4
52815 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52816 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52817 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52818 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52819 +4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
52820 +5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
52821 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52822 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52823 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52824 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52825 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52826 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52827 +4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
52828 +4 4 4 4 4 4
52829 diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
52830 index fe92eed..106e085 100644
52831 --- a/drivers/video/mb862xx/mb862xxfb_accel.c
52832 +++ b/drivers/video/mb862xx/mb862xxfb_accel.c
52833 @@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
52834 struct mb862xxfb_par *par = info->par;
52835
52836 if (info->var.bits_per_pixel == 32) {
52837 - info->fbops->fb_fillrect = cfb_fillrect;
52838 - info->fbops->fb_copyarea = cfb_copyarea;
52839 - info->fbops->fb_imageblit = cfb_imageblit;
52840 + pax_open_kernel();
52841 + *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
52842 + *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
52843 + *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
52844 + pax_close_kernel();
52845 } else {
52846 outreg(disp, GC_L0EM, 3);
52847 - info->fbops->fb_fillrect = mb86290fb_fillrect;
52848 - info->fbops->fb_copyarea = mb86290fb_copyarea;
52849 - info->fbops->fb_imageblit = mb86290fb_imageblit;
52850 + pax_open_kernel();
52851 + *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
52852 + *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
52853 + *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
52854 + pax_close_kernel();
52855 }
52856 outreg(draw, GDC_REG_DRAW_BASE, 0);
52857 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
52858 diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
52859 index ff22871..b129bed 100644
52860 --- a/drivers/video/nvidia/nvidia.c
52861 +++ b/drivers/video/nvidia/nvidia.c
52862 @@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
52863 info->fix.line_length = (info->var.xres_virtual *
52864 info->var.bits_per_pixel) >> 3;
52865 if (info->var.accel_flags) {
52866 - info->fbops->fb_imageblit = nvidiafb_imageblit;
52867 - info->fbops->fb_fillrect = nvidiafb_fillrect;
52868 - info->fbops->fb_copyarea = nvidiafb_copyarea;
52869 - info->fbops->fb_sync = nvidiafb_sync;
52870 + pax_open_kernel();
52871 + *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
52872 + *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
52873 + *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
52874 + *(void **)&info->fbops->fb_sync = nvidiafb_sync;
52875 + pax_close_kernel();
52876 info->pixmap.scan_align = 4;
52877 info->flags &= ~FBINFO_HWACCEL_DISABLED;
52878 info->flags |= FBINFO_READS_FAST;
52879 NVResetGraphics(info);
52880 } else {
52881 - info->fbops->fb_imageblit = cfb_imageblit;
52882 - info->fbops->fb_fillrect = cfb_fillrect;
52883 - info->fbops->fb_copyarea = cfb_copyarea;
52884 - info->fbops->fb_sync = NULL;
52885 + pax_open_kernel();
52886 + *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
52887 + *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
52888 + *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
52889 + *(void **)&info->fbops->fb_sync = NULL;
52890 + pax_close_kernel();
52891 info->pixmap.scan_align = 1;
52892 info->flags |= FBINFO_HWACCEL_DISABLED;
52893 info->flags &= ~FBINFO_READS_FAST;
52894 @@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
52895 info->pixmap.size = 8 * 1024;
52896 info->pixmap.flags = FB_PIXMAP_SYSTEM;
52897
52898 - if (!hwcur)
52899 - info->fbops->fb_cursor = NULL;
52900 + if (!hwcur) {
52901 + pax_open_kernel();
52902 + *(void **)&info->fbops->fb_cursor = NULL;
52903 + pax_close_kernel();
52904 + }
52905
52906 info->var.accel_flags = (!noaccel);
52907
52908 diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
52909 index fafe7c9..93197b9 100644
52910 --- a/drivers/video/omap2/dss/display.c
52911 +++ b/drivers/video/omap2/dss/display.c
52912 @@ -137,12 +137,14 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
52913 snprintf(dssdev->alias, sizeof(dssdev->alias),
52914 "display%d", disp_num_counter++);
52915
52916 + pax_open_kernel();
52917 if (drv && drv->get_resolution == NULL)
52918 - drv->get_resolution = omapdss_default_get_resolution;
52919 + *(void **)&drv->get_resolution = omapdss_default_get_resolution;
52920 if (drv && drv->get_recommended_bpp == NULL)
52921 - drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
52922 + *(void **)&drv->get_recommended_bpp = omapdss_default_get_recommended_bpp;
52923 if (drv && drv->get_timings == NULL)
52924 - drv->get_timings = omapdss_default_get_timings;
52925 + *(void **)&drv->get_timings = omapdss_default_get_timings;
52926 + pax_close_kernel();
52927
52928 mutex_lock(&panel_list_mutex);
52929 list_add_tail(&dssdev->panel_list, &panel_list);
52930 diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
52931 index 05c2dc3..ea1f391 100644
52932 --- a/drivers/video/s1d13xxxfb.c
52933 +++ b/drivers/video/s1d13xxxfb.c
52934 @@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
52935
52936 switch(prod_id) {
52937 case S1D13506_PROD_ID: /* activate acceleration */
52938 - s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
52939 - s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
52940 + pax_open_kernel();
52941 + *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
52942 + *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
52943 + pax_close_kernel();
52944 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
52945 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
52946 break;
52947 diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
52948 index e188ada..aac63c8 100644
52949 --- a/drivers/video/smscufx.c
52950 +++ b/drivers/video/smscufx.c
52951 @@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
52952 fb_deferred_io_cleanup(info);
52953 kfree(info->fbdefio);
52954 info->fbdefio = NULL;
52955 - info->fbops->fb_mmap = ufx_ops_mmap;
52956 + pax_open_kernel();
52957 + *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
52958 + pax_close_kernel();
52959 }
52960
52961 pr_debug("released /dev/fb%d user=%d count=%d",
52962 diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
52963 index d2e5bc3..4cb05d1 100644
52964 --- a/drivers/video/udlfb.c
52965 +++ b/drivers/video/udlfb.c
52966 @@ -623,11 +623,11 @@ static int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
52967 dlfb_urb_completion(urb);
52968
52969 error:
52970 - atomic_add(bytes_sent, &dev->bytes_sent);
52971 - atomic_add(bytes_identical, &dev->bytes_identical);
52972 - atomic_add(width*height*2, &dev->bytes_rendered);
52973 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
52974 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
52975 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
52976 end_cycles = get_cycles();
52977 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
52978 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
52979 >> 10)), /* Kcycles */
52980 &dev->cpu_kcycles_used);
52981
52982 @@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
52983 dlfb_urb_completion(urb);
52984
52985 error:
52986 - atomic_add(bytes_sent, &dev->bytes_sent);
52987 - atomic_add(bytes_identical, &dev->bytes_identical);
52988 - atomic_add(bytes_rendered, &dev->bytes_rendered);
52989 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
52990 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
52991 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
52992 end_cycles = get_cycles();
52993 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
52994 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
52995 >> 10)), /* Kcycles */
52996 &dev->cpu_kcycles_used);
52997 }
52998 @@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
52999 fb_deferred_io_cleanup(info);
53000 kfree(info->fbdefio);
53001 info->fbdefio = NULL;
53002 - info->fbops->fb_mmap = dlfb_ops_mmap;
53003 + pax_open_kernel();
53004 + *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
53005 + pax_close_kernel();
53006 }
53007
53008 pr_warn("released /dev/fb%d user=%d count=%d\n",
53009 @@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
53010 struct fb_info *fb_info = dev_get_drvdata(fbdev);
53011 struct dlfb_data *dev = fb_info->par;
53012 return snprintf(buf, PAGE_SIZE, "%u\n",
53013 - atomic_read(&dev->bytes_rendered));
53014 + atomic_read_unchecked(&dev->bytes_rendered));
53015 }
53016
53017 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
53018 @@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
53019 struct fb_info *fb_info = dev_get_drvdata(fbdev);
53020 struct dlfb_data *dev = fb_info->par;
53021 return snprintf(buf, PAGE_SIZE, "%u\n",
53022 - atomic_read(&dev->bytes_identical));
53023 + atomic_read_unchecked(&dev->bytes_identical));
53024 }
53025
53026 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
53027 @@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
53028 struct fb_info *fb_info = dev_get_drvdata(fbdev);
53029 struct dlfb_data *dev = fb_info->par;
53030 return snprintf(buf, PAGE_SIZE, "%u\n",
53031 - atomic_read(&dev->bytes_sent));
53032 + atomic_read_unchecked(&dev->bytes_sent));
53033 }
53034
53035 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
53036 @@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
53037 struct fb_info *fb_info = dev_get_drvdata(fbdev);
53038 struct dlfb_data *dev = fb_info->par;
53039 return snprintf(buf, PAGE_SIZE, "%u\n",
53040 - atomic_read(&dev->cpu_kcycles_used));
53041 + atomic_read_unchecked(&dev->cpu_kcycles_used));
53042 }
53043
53044 static ssize_t edid_show(
53045 @@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
53046 struct fb_info *fb_info = dev_get_drvdata(fbdev);
53047 struct dlfb_data *dev = fb_info->par;
53048
53049 - atomic_set(&dev->bytes_rendered, 0);
53050 - atomic_set(&dev->bytes_identical, 0);
53051 - atomic_set(&dev->bytes_sent, 0);
53052 - atomic_set(&dev->cpu_kcycles_used, 0);
53053 + atomic_set_unchecked(&dev->bytes_rendered, 0);
53054 + atomic_set_unchecked(&dev->bytes_identical, 0);
53055 + atomic_set_unchecked(&dev->bytes_sent, 0);
53056 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
53057
53058 return count;
53059 }
53060 diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
53061 index 7aec6f3..e3b2d55 100644
53062 --- a/drivers/video/uvesafb.c
53063 +++ b/drivers/video/uvesafb.c
53064 @@ -19,6 +19,7 @@
53065 #include <linux/io.h>
53066 #include <linux/mutex.h>
53067 #include <linux/slab.h>
53068 +#include <linux/moduleloader.h>
53069 #include <video/edid.h>
53070 #include <video/uvesafb.h>
53071 #ifdef CONFIG_X86
53072 @@ -566,10 +567,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
53073 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
53074 par->pmi_setpal = par->ypan = 0;
53075 } else {
53076 +
53077 +#ifdef CONFIG_PAX_KERNEXEC
53078 +#ifdef CONFIG_MODULES
53079 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
53080 +#endif
53081 + if (!par->pmi_code) {
53082 + par->pmi_setpal = par->ypan = 0;
53083 + return 0;
53084 + }
53085 +#endif
53086 +
53087 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
53088 + task->t.regs.edi);
53089 +
53090 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53091 + pax_open_kernel();
53092 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
53093 + pax_close_kernel();
53094 +
53095 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
53096 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
53097 +#else
53098 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
53099 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
53100 +#endif
53101 +
53102 printk(KERN_INFO "uvesafb: protected mode interface info at "
53103 "%04x:%04x\n",
53104 (u16)task->t.regs.es, (u16)task->t.regs.edi);
53105 @@ -814,13 +837,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
53106 par->ypan = ypan;
53107
53108 if (par->pmi_setpal || par->ypan) {
53109 +#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
53110 if (__supported_pte_mask & _PAGE_NX) {
53111 par->pmi_setpal = par->ypan = 0;
53112 printk(KERN_WARNING "uvesafb: NX protection is active, "
53113 "better not use the PMI.\n");
53114 - } else {
53115 + } else
53116 +#endif
53117 uvesafb_vbe_getpmi(task, par);
53118 - }
53119 }
53120 #else
53121 /* The protected mode interface is not available on non-x86. */
53122 @@ -1454,8 +1478,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
53123 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
53124
53125 /* Disable blanking if the user requested so. */
53126 - if (!blank)
53127 - info->fbops->fb_blank = NULL;
53128 + if (!blank) {
53129 + pax_open_kernel();
53130 + *(void **)&info->fbops->fb_blank = NULL;
53131 + pax_close_kernel();
53132 + }
53133
53134 /*
53135 * Find out how much IO memory is required for the mode with
53136 @@ -1531,8 +1558,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
53137 info->flags = FBINFO_FLAG_DEFAULT |
53138 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
53139
53140 - if (!par->ypan)
53141 - info->fbops->fb_pan_display = NULL;
53142 + if (!par->ypan) {
53143 + pax_open_kernel();
53144 + *(void **)&info->fbops->fb_pan_display = NULL;
53145 + pax_close_kernel();
53146 + }
53147 }
53148
53149 static void uvesafb_init_mtrr(struct fb_info *info)
53150 @@ -1796,6 +1826,11 @@ out:
53151 if (par->vbe_modes)
53152 kfree(par->vbe_modes);
53153
53154 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53155 + if (par->pmi_code)
53156 + module_free_exec(NULL, par->pmi_code);
53157 +#endif
53158 +
53159 framebuffer_release(info);
53160 return err;
53161 }
53162 @@ -1823,6 +1858,12 @@ static int uvesafb_remove(struct platform_device *dev)
53163 kfree(par->vbe_state_orig);
53164 if (par->vbe_state_saved)
53165 kfree(par->vbe_state_saved);
53166 +
53167 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53168 + if (par->pmi_code)
53169 + module_free_exec(NULL, par->pmi_code);
53170 +#endif
53171 +
53172 }
53173
53174 framebuffer_release(info);
53175 diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
53176 index bd83233..7d8a5aa 100644
53177 --- a/drivers/video/vesafb.c
53178 +++ b/drivers/video/vesafb.c
53179 @@ -9,6 +9,7 @@
53180 */
53181
53182 #include <linux/module.h>
53183 +#include <linux/moduleloader.h>
53184 #include <linux/kernel.h>
53185 #include <linux/errno.h>
53186 #include <linux/string.h>
53187 @@ -52,8 +53,8 @@ static int vram_remap; /* Set amount of memory to be used */
53188 static int vram_total; /* Set total amount of memory */
53189 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
53190 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
53191 -static void (*pmi_start)(void) __read_mostly;
53192 -static void (*pmi_pal) (void) __read_mostly;
53193 +static void (*pmi_start)(void) __read_only;
53194 +static void (*pmi_pal) (void) __read_only;
53195 static int depth __read_mostly;
53196 static int vga_compat __read_mostly;
53197 /* --------------------------------------------------------------------- */
53198 @@ -234,6 +235,7 @@ static int vesafb_probe(struct platform_device *dev)
53199 unsigned int size_remap;
53200 unsigned int size_total;
53201 char *option = NULL;
53202 + void *pmi_code = NULL;
53203
53204 /* ignore error return of fb_get_options */
53205 fb_get_options("vesafb", &option);
53206 @@ -280,10 +282,6 @@ static int vesafb_probe(struct platform_device *dev)
53207 size_remap = size_total;
53208 vesafb_fix.smem_len = size_remap;
53209
53210 -#ifndef __i386__
53211 - screen_info.vesapm_seg = 0;
53212 -#endif
53213 -
53214 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
53215 printk(KERN_WARNING
53216 "vesafb: cannot reserve video memory at 0x%lx\n",
53217 @@ -312,9 +310,21 @@ static int vesafb_probe(struct platform_device *dev)
53218 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
53219 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
53220
53221 +#ifdef __i386__
53222 +
53223 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53224 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
53225 + if (!pmi_code)
53226 +#elif !defined(CONFIG_PAX_KERNEXEC)
53227 + if (0)
53228 +#endif
53229 +
53230 +#endif
53231 + screen_info.vesapm_seg = 0;
53232 +
53233 if (screen_info.vesapm_seg) {
53234 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
53235 - screen_info.vesapm_seg,screen_info.vesapm_off);
53236 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
53237 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
53238 }
53239
53240 if (screen_info.vesapm_seg < 0xc000)
53241 @@ -322,9 +332,25 @@ static int vesafb_probe(struct platform_device *dev)
53242
53243 if (ypan || pmi_setpal) {
53244 unsigned short *pmi_base;
53245 +
53246 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
53247 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
53248 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
53249 +
53250 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53251 + pax_open_kernel();
53252 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
53253 +#else
53254 + pmi_code = pmi_base;
53255 +#endif
53256 +
53257 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
53258 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
53259 +
53260 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53261 + pmi_start = ktva_ktla(pmi_start);
53262 + pmi_pal = ktva_ktla(pmi_pal);
53263 + pax_close_kernel();
53264 +#endif
53265 +
53266 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
53267 if (pmi_base[3]) {
53268 printk(KERN_INFO "vesafb: pmi: ports = ");
53269 @@ -477,8 +503,11 @@ static int vesafb_probe(struct platform_device *dev)
53270 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
53271 (ypan ? FBINFO_HWACCEL_YPAN : 0);
53272
53273 - if (!ypan)
53274 - info->fbops->fb_pan_display = NULL;
53275 + if (!ypan) {
53276 + pax_open_kernel();
53277 + *(void **)&info->fbops->fb_pan_display = NULL;
53278 + pax_close_kernel();
53279 + }
53280
53281 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
53282 err = -ENOMEM;
53283 @@ -493,6 +522,11 @@ static int vesafb_probe(struct platform_device *dev)
53284 info->node, info->fix.id);
53285 return 0;
53286 err:
53287 +
53288 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
53289 + module_free_exec(NULL, pmi_code);
53290 +#endif
53291 +
53292 if (info->screen_base)
53293 iounmap(info->screen_base);
53294 framebuffer_release(info);
53295 diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
53296 index 88714ae..16c2e11 100644
53297 --- a/drivers/video/via/via_clock.h
53298 +++ b/drivers/video/via/via_clock.h
53299 @@ -56,7 +56,7 @@ struct via_clock {
53300
53301 void (*set_engine_pll_state)(u8 state);
53302 void (*set_engine_pll)(struct via_pll_config config);
53303 -};
53304 +} __no_const;
53305
53306
53307 static inline u32 get_pll_internal_frequency(u32 ref_freq,
53308 diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
53309 index fef20db..d28b1ab 100644
53310 --- a/drivers/xen/xenfs/xenstored.c
53311 +++ b/drivers/xen/xenfs/xenstored.c
53312 @@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
53313 static int xsd_kva_open(struct inode *inode, struct file *file)
53314 {
53315 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
53316 +#ifdef CONFIG_GRKERNSEC_HIDESYM
53317 + NULL);
53318 +#else
53319 xen_store_interface);
53320 +#endif
53321 +
53322 if (!file->private_data)
53323 return -ENOMEM;
53324 return 0;
53325 diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
53326 index 9ff073f..05cef23 100644
53327 --- a/fs/9p/vfs_addr.c
53328 +++ b/fs/9p/vfs_addr.c
53329 @@ -187,7 +187,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
53330
53331 retval = v9fs_file_write_internal(inode,
53332 v9inode->writeback_fid,
53333 - (__force const char __user *)buffer,
53334 + (const char __force_user *)buffer,
53335 len, &offset, 0);
53336 if (retval > 0)
53337 retval = 0;
53338 diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
53339 index 94de6d1..8d81256 100644
53340 --- a/fs/9p/vfs_inode.c
53341 +++ b/fs/9p/vfs_inode.c
53342 @@ -1312,7 +1312,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
53343 void
53344 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53345 {
53346 - char *s = nd_get_link(nd);
53347 + const char *s = nd_get_link(nd);
53348
53349 p9_debug(P9_DEBUG_VFS, " %s %s\n",
53350 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
53351 diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
53352 index 370b24c..ff0be7b 100644
53353 --- a/fs/Kconfig.binfmt
53354 +++ b/fs/Kconfig.binfmt
53355 @@ -103,7 +103,7 @@ config HAVE_AOUT
53356
53357 config BINFMT_AOUT
53358 tristate "Kernel support for a.out and ECOFF binaries"
53359 - depends on HAVE_AOUT
53360 + depends on HAVE_AOUT && BROKEN
53361 ---help---
53362 A.out (Assembler.OUTput) is a set of formats for libraries and
53363 executables used in the earliest versions of UNIX. Linux used
53364 diff --git a/fs/afs/inode.c b/fs/afs/inode.c
53365 index 789bc25..fafaeea 100644
53366 --- a/fs/afs/inode.c
53367 +++ b/fs/afs/inode.c
53368 @@ -141,7 +141,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
53369 struct afs_vnode *vnode;
53370 struct super_block *sb;
53371 struct inode *inode;
53372 - static atomic_t afs_autocell_ino;
53373 + static atomic_unchecked_t afs_autocell_ino;
53374
53375 _enter("{%x:%u},%*.*s,",
53376 AFS_FS_I(dir)->fid.vid, AFS_FS_I(dir)->fid.vnode,
53377 @@ -154,7 +154,7 @@ struct inode *afs_iget_autocell(struct inode *dir, const char *dev_name,
53378 data.fid.unique = 0;
53379 data.fid.vnode = 0;
53380
53381 - inode = iget5_locked(sb, atomic_inc_return(&afs_autocell_ino),
53382 + inode = iget5_locked(sb, atomic_inc_return_unchecked(&afs_autocell_ino),
53383 afs_iget5_autocell_test, afs_iget5_set,
53384 &data);
53385 if (!inode) {
53386 diff --git a/fs/aio.c b/fs/aio.c
53387 index 6efb7f6..ec354de 100644
53388 --- a/fs/aio.c
53389 +++ b/fs/aio.c
53390 @@ -338,7 +338,7 @@ static int aio_setup_ring(struct kioctx *ctx)
53391 size += sizeof(struct io_event) * nr_events;
53392
53393 nr_pages = PFN_UP(size);
53394 - if (nr_pages < 0)
53395 + if (nr_pages <= 0)
53396 return -EINVAL;
53397
53398 file = aio_private_file(ctx, nr_pages);
53399 @@ -652,7 +652,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
53400 aio_nr += ctx->max_reqs;
53401 spin_unlock(&aio_nr_lock);
53402
53403 - percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
53404 + percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
53405 + percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
53406
53407 err = ioctx_add_table(ctx, mm);
53408 if (err)
53409 diff --git a/fs/attr.c b/fs/attr.c
53410 index 1449adb..a2038c2 100644
53411 --- a/fs/attr.c
53412 +++ b/fs/attr.c
53413 @@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
53414 unsigned long limit;
53415
53416 limit = rlimit(RLIMIT_FSIZE);
53417 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
53418 if (limit != RLIM_INFINITY && offset > limit)
53419 goto out_sig;
53420 if (offset > inode->i_sb->s_maxbytes)
53421 diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
53422 index 689e40d..515cac5 100644
53423 --- a/fs/autofs4/waitq.c
53424 +++ b/fs/autofs4/waitq.c
53425 @@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
53426 {
53427 unsigned long sigpipe, flags;
53428 mm_segment_t fs;
53429 - const char *data = (const char *)addr;
53430 + const char __user *data = (const char __force_user *)addr;
53431 ssize_t wr = 0;
53432
53433 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
53434 @@ -340,6 +340,10 @@ static int validate_request(struct autofs_wait_queue **wait,
53435 return 1;
53436 }
53437
53438 +#ifdef CONFIG_GRKERNSEC_HIDESYM
53439 +static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
53440 +#endif
53441 +
53442 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
53443 enum autofs_notify notify)
53444 {
53445 @@ -373,7 +377,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
53446
53447 /* If this is a direct mount request create a dummy name */
53448 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
53449 +#ifdef CONFIG_GRKERNSEC_HIDESYM
53450 + /* this name does get written to userland via autofs4_write() */
53451 + qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
53452 +#else
53453 qstr.len = sprintf(name, "%p", dentry);
53454 +#endif
53455 else {
53456 qstr.len = autofs4_getpath(sbi, dentry, &name);
53457 if (!qstr.len) {
53458 diff --git a/fs/befs/endian.h b/fs/befs/endian.h
53459 index 2722387..56059b5 100644
53460 --- a/fs/befs/endian.h
53461 +++ b/fs/befs/endian.h
53462 @@ -11,7 +11,7 @@
53463
53464 #include <asm/byteorder.h>
53465
53466 -static inline u64
53467 +static inline u64 __intentional_overflow(-1)
53468 fs64_to_cpu(const struct super_block *sb, fs64 n)
53469 {
53470 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
53471 @@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
53472 return (__force fs64)cpu_to_be64(n);
53473 }
53474
53475 -static inline u32
53476 +static inline u32 __intentional_overflow(-1)
53477 fs32_to_cpu(const struct super_block *sb, fs32 n)
53478 {
53479 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
53480 @@ -47,7 +47,7 @@ cpu_to_fs32(const struct super_block *sb, u32 n)
53481 return (__force fs32)cpu_to_be32(n);
53482 }
53483
53484 -static inline u16
53485 +static inline u16 __intentional_overflow(-1)
53486 fs16_to_cpu(const struct super_block *sb, fs16 n)
53487 {
53488 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
53489 diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
53490 index e9c75e2..1baece1 100644
53491 --- a/fs/befs/linuxvfs.c
53492 +++ b/fs/befs/linuxvfs.c
53493 @@ -514,7 +514,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53494 {
53495 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
53496 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
53497 - char *link = nd_get_link(nd);
53498 + const char *link = nd_get_link(nd);
53499 if (!IS_ERR(link))
53500 kfree(link);
53501 }
53502 diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
53503 index 89dec7f..361b0d75 100644
53504 --- a/fs/binfmt_aout.c
53505 +++ b/fs/binfmt_aout.c
53506 @@ -16,6 +16,7 @@
53507 #include <linux/string.h>
53508 #include <linux/fs.h>
53509 #include <linux/file.h>
53510 +#include <linux/security.h>
53511 #include <linux/stat.h>
53512 #include <linux/fcntl.h>
53513 #include <linux/ptrace.h>
53514 @@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
53515 #endif
53516 # define START_STACK(u) ((void __user *)u.start_stack)
53517
53518 + memset(&dump, 0, sizeof(dump));
53519 +
53520 fs = get_fs();
53521 set_fs(KERNEL_DS);
53522 has_dumped = 1;
53523 @@ -69,10 +72,12 @@ static int aout_core_dump(struct coredump_params *cprm)
53524
53525 /* If the size of the dump file exceeds the rlimit, then see what would happen
53526 if we wrote the stack, but not the data area. */
53527 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
53528 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
53529 dump.u_dsize = 0;
53530
53531 /* Make sure we have enough room to write the stack and data areas. */
53532 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
53533 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
53534 dump.u_ssize = 0;
53535
53536 @@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
53537 rlim = rlimit(RLIMIT_DATA);
53538 if (rlim >= RLIM_INFINITY)
53539 rlim = ~0;
53540 +
53541 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
53542 if (ex.a_data + ex.a_bss > rlim)
53543 return -ENOMEM;
53544
53545 @@ -265,6 +272,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
53546
53547 install_exec_creds(bprm);
53548
53549 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53550 + current->mm->pax_flags = 0UL;
53551 +#endif
53552 +
53553 +#ifdef CONFIG_PAX_PAGEEXEC
53554 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
53555 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
53556 +
53557 +#ifdef CONFIG_PAX_EMUTRAMP
53558 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
53559 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
53560 +#endif
53561 +
53562 +#ifdef CONFIG_PAX_MPROTECT
53563 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
53564 + current->mm->pax_flags |= MF_PAX_MPROTECT;
53565 +#endif
53566 +
53567 + }
53568 +#endif
53569 +
53570 if (N_MAGIC(ex) == OMAGIC) {
53571 unsigned long text_addr, map_size;
53572 loff_t pos;
53573 @@ -322,7 +350,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
53574 }
53575
53576 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
53577 - PROT_READ | PROT_WRITE | PROT_EXEC,
53578 + PROT_READ | PROT_WRITE,
53579 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
53580 fd_offset + ex.a_text);
53581 if (error != N_DATADDR(ex)) {
53582 diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
53583 index 4c94a79..228e9da 100644
53584 --- a/fs/binfmt_elf.c
53585 +++ b/fs/binfmt_elf.c
53586 @@ -34,6 +34,7 @@
53587 #include <linux/utsname.h>
53588 #include <linux/coredump.h>
53589 #include <linux/sched.h>
53590 +#include <linux/xattr.h>
53591 #include <asm/uaccess.h>
53592 #include <asm/param.h>
53593 #include <asm/page.h>
53594 @@ -60,6 +61,14 @@ static int elf_core_dump(struct coredump_params *cprm);
53595 #define elf_core_dump NULL
53596 #endif
53597
53598 +#ifdef CONFIG_PAX_MPROTECT
53599 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
53600 +#endif
53601 +
53602 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53603 +static void elf_handle_mmap(struct file *file);
53604 +#endif
53605 +
53606 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
53607 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
53608 #else
53609 @@ -79,6 +88,15 @@ static struct linux_binfmt elf_format = {
53610 .load_binary = load_elf_binary,
53611 .load_shlib = load_elf_library,
53612 .core_dump = elf_core_dump,
53613 +
53614 +#ifdef CONFIG_PAX_MPROTECT
53615 + .handle_mprotect= elf_handle_mprotect,
53616 +#endif
53617 +
53618 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
53619 + .handle_mmap = elf_handle_mmap,
53620 +#endif
53621 +
53622 .min_coredump = ELF_EXEC_PAGESIZE,
53623 };
53624
53625 @@ -86,6 +104,8 @@ static struct linux_binfmt elf_format = {
53626
53627 static int set_brk(unsigned long start, unsigned long end)
53628 {
53629 + unsigned long e = end;
53630 +
53631 start = ELF_PAGEALIGN(start);
53632 end = ELF_PAGEALIGN(end);
53633 if (end > start) {
53634 @@ -94,7 +114,7 @@ static int set_brk(unsigned long start, unsigned long end)
53635 if (BAD_ADDR(addr))
53636 return addr;
53637 }
53638 - current->mm->start_brk = current->mm->brk = end;
53639 + current->mm->start_brk = current->mm->brk = e;
53640 return 0;
53641 }
53642
53643 @@ -155,12 +175,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
53644 elf_addr_t __user *u_rand_bytes;
53645 const char *k_platform = ELF_PLATFORM;
53646 const char *k_base_platform = ELF_BASE_PLATFORM;
53647 - unsigned char k_rand_bytes[16];
53648 + u32 k_rand_bytes[4];
53649 int items;
53650 elf_addr_t *elf_info;
53651 int ei_index = 0;
53652 const struct cred *cred = current_cred();
53653 struct vm_area_struct *vma;
53654 + unsigned long saved_auxv[AT_VECTOR_SIZE];
53655
53656 /*
53657 * In some cases (e.g. Hyper-Threading), we want to avoid L1
53658 @@ -202,8 +223,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
53659 * Generate 16 random bytes for userspace PRNG seeding.
53660 */
53661 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
53662 - u_rand_bytes = (elf_addr_t __user *)
53663 - STACK_ALLOC(p, sizeof(k_rand_bytes));
53664 + prandom_seed(k_rand_bytes[0] ^ prandom_u32());
53665 + prandom_seed(k_rand_bytes[1] ^ prandom_u32());
53666 + prandom_seed(k_rand_bytes[2] ^ prandom_u32());
53667 + prandom_seed(k_rand_bytes[3] ^ prandom_u32());
53668 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
53669 + u_rand_bytes = (elf_addr_t __user *) p;
53670 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
53671 return -EFAULT;
53672
53673 @@ -318,9 +343,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
53674 return -EFAULT;
53675 current->mm->env_end = p;
53676
53677 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
53678 +
53679 /* Put the elf_info on the stack in the right place. */
53680 sp = (elf_addr_t __user *)envp + 1;
53681 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
53682 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
53683 return -EFAULT;
53684 return 0;
53685 }
53686 @@ -388,15 +415,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
53687 an ELF header */
53688
53689 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
53690 - struct file *interpreter, unsigned long *interp_map_addr,
53691 - unsigned long no_base)
53692 + struct file *interpreter, unsigned long no_base)
53693 {
53694 struct elf_phdr *elf_phdata;
53695 struct elf_phdr *eppnt;
53696 - unsigned long load_addr = 0;
53697 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
53698 int load_addr_set = 0;
53699 unsigned long last_bss = 0, elf_bss = 0;
53700 - unsigned long error = ~0UL;
53701 + unsigned long error = -EINVAL;
53702 unsigned long total_size;
53703 int retval, i, size;
53704
53705 @@ -442,6 +468,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
53706 goto out_close;
53707 }
53708
53709 +#ifdef CONFIG_PAX_SEGMEXEC
53710 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
53711 + pax_task_size = SEGMEXEC_TASK_SIZE;
53712 +#endif
53713 +
53714 eppnt = elf_phdata;
53715 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
53716 if (eppnt->p_type == PT_LOAD) {
53717 @@ -465,8 +496,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
53718 map_addr = elf_map(interpreter, load_addr + vaddr,
53719 eppnt, elf_prot, elf_type, total_size);
53720 total_size = 0;
53721 - if (!*interp_map_addr)
53722 - *interp_map_addr = map_addr;
53723 error = map_addr;
53724 if (BAD_ADDR(map_addr))
53725 goto out_close;
53726 @@ -485,8 +514,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
53727 k = load_addr + eppnt->p_vaddr;
53728 if (BAD_ADDR(k) ||
53729 eppnt->p_filesz > eppnt->p_memsz ||
53730 - eppnt->p_memsz > TASK_SIZE ||
53731 - TASK_SIZE - eppnt->p_memsz < k) {
53732 + eppnt->p_memsz > pax_task_size ||
53733 + pax_task_size - eppnt->p_memsz < k) {
53734 error = -ENOMEM;
53735 goto out_close;
53736 }
53737 @@ -525,9 +554,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
53738 elf_bss = ELF_PAGESTART(elf_bss + ELF_MIN_ALIGN - 1);
53739
53740 /* Map the last of the bss segment */
53741 - error = vm_brk(elf_bss, last_bss - elf_bss);
53742 - if (BAD_ADDR(error))
53743 - goto out_close;
53744 + if (last_bss > elf_bss) {
53745 + error = vm_brk(elf_bss, last_bss - elf_bss);
53746 + if (BAD_ADDR(error))
53747 + goto out_close;
53748 + }
53749 }
53750
53751 error = load_addr;
53752 @@ -538,6 +569,322 @@ out:
53753 return error;
53754 }
53755
53756 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
53757 +#ifdef CONFIG_PAX_SOFTMODE
53758 +static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
53759 +{
53760 + unsigned long pax_flags = 0UL;
53761 +
53762 +#ifdef CONFIG_PAX_PAGEEXEC
53763 + if (elf_phdata->p_flags & PF_PAGEEXEC)
53764 + pax_flags |= MF_PAX_PAGEEXEC;
53765 +#endif
53766 +
53767 +#ifdef CONFIG_PAX_SEGMEXEC
53768 + if (elf_phdata->p_flags & PF_SEGMEXEC)
53769 + pax_flags |= MF_PAX_SEGMEXEC;
53770 +#endif
53771 +
53772 +#ifdef CONFIG_PAX_EMUTRAMP
53773 + if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
53774 + pax_flags |= MF_PAX_EMUTRAMP;
53775 +#endif
53776 +
53777 +#ifdef CONFIG_PAX_MPROTECT
53778 + if (elf_phdata->p_flags & PF_MPROTECT)
53779 + pax_flags |= MF_PAX_MPROTECT;
53780 +#endif
53781 +
53782 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
53783 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
53784 + pax_flags |= MF_PAX_RANDMMAP;
53785 +#endif
53786 +
53787 + return pax_flags;
53788 +}
53789 +#endif
53790 +
53791 +static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
53792 +{
53793 + unsigned long pax_flags = 0UL;
53794 +
53795 +#ifdef CONFIG_PAX_PAGEEXEC
53796 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
53797 + pax_flags |= MF_PAX_PAGEEXEC;
53798 +#endif
53799 +
53800 +#ifdef CONFIG_PAX_SEGMEXEC
53801 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
53802 + pax_flags |= MF_PAX_SEGMEXEC;
53803 +#endif
53804 +
53805 +#ifdef CONFIG_PAX_EMUTRAMP
53806 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
53807 + pax_flags |= MF_PAX_EMUTRAMP;
53808 +#endif
53809 +
53810 +#ifdef CONFIG_PAX_MPROTECT
53811 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
53812 + pax_flags |= MF_PAX_MPROTECT;
53813 +#endif
53814 +
53815 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
53816 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
53817 + pax_flags |= MF_PAX_RANDMMAP;
53818 +#endif
53819 +
53820 + return pax_flags;
53821 +}
53822 +#endif
53823 +
53824 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
53825 +#ifdef CONFIG_PAX_SOFTMODE
53826 +static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
53827 +{
53828 + unsigned long pax_flags = 0UL;
53829 +
53830 +#ifdef CONFIG_PAX_PAGEEXEC
53831 + if (pax_flags_softmode & MF_PAX_PAGEEXEC)
53832 + pax_flags |= MF_PAX_PAGEEXEC;
53833 +#endif
53834 +
53835 +#ifdef CONFIG_PAX_SEGMEXEC
53836 + if (pax_flags_softmode & MF_PAX_SEGMEXEC)
53837 + pax_flags |= MF_PAX_SEGMEXEC;
53838 +#endif
53839 +
53840 +#ifdef CONFIG_PAX_EMUTRAMP
53841 + if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
53842 + pax_flags |= MF_PAX_EMUTRAMP;
53843 +#endif
53844 +
53845 +#ifdef CONFIG_PAX_MPROTECT
53846 + if (pax_flags_softmode & MF_PAX_MPROTECT)
53847 + pax_flags |= MF_PAX_MPROTECT;
53848 +#endif
53849 +
53850 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
53851 + if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
53852 + pax_flags |= MF_PAX_RANDMMAP;
53853 +#endif
53854 +
53855 + return pax_flags;
53856 +}
53857 +#endif
53858 +
53859 +static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
53860 +{
53861 + unsigned long pax_flags = 0UL;
53862 +
53863 +#ifdef CONFIG_PAX_PAGEEXEC
53864 + if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
53865 + pax_flags |= MF_PAX_PAGEEXEC;
53866 +#endif
53867 +
53868 +#ifdef CONFIG_PAX_SEGMEXEC
53869 + if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
53870 + pax_flags |= MF_PAX_SEGMEXEC;
53871 +#endif
53872 +
53873 +#ifdef CONFIG_PAX_EMUTRAMP
53874 + if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
53875 + pax_flags |= MF_PAX_EMUTRAMP;
53876 +#endif
53877 +
53878 +#ifdef CONFIG_PAX_MPROTECT
53879 + if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
53880 + pax_flags |= MF_PAX_MPROTECT;
53881 +#endif
53882 +
53883 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
53884 + if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
53885 + pax_flags |= MF_PAX_RANDMMAP;
53886 +#endif
53887 +
53888 + return pax_flags;
53889 +}
53890 +#endif
53891 +
53892 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53893 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
53894 +{
53895 + unsigned long pax_flags = 0UL;
53896 +
53897 +#ifdef CONFIG_PAX_EI_PAX
53898 +
53899 +#ifdef CONFIG_PAX_PAGEEXEC
53900 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
53901 + pax_flags |= MF_PAX_PAGEEXEC;
53902 +#endif
53903 +
53904 +#ifdef CONFIG_PAX_SEGMEXEC
53905 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
53906 + pax_flags |= MF_PAX_SEGMEXEC;
53907 +#endif
53908 +
53909 +#ifdef CONFIG_PAX_EMUTRAMP
53910 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
53911 + pax_flags |= MF_PAX_EMUTRAMP;
53912 +#endif
53913 +
53914 +#ifdef CONFIG_PAX_MPROTECT
53915 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
53916 + pax_flags |= MF_PAX_MPROTECT;
53917 +#endif
53918 +
53919 +#ifdef CONFIG_PAX_ASLR
53920 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
53921 + pax_flags |= MF_PAX_RANDMMAP;
53922 +#endif
53923 +
53924 +#else
53925 +
53926 +#ifdef CONFIG_PAX_PAGEEXEC
53927 + pax_flags |= MF_PAX_PAGEEXEC;
53928 +#endif
53929 +
53930 +#ifdef CONFIG_PAX_SEGMEXEC
53931 + pax_flags |= MF_PAX_SEGMEXEC;
53932 +#endif
53933 +
53934 +#ifdef CONFIG_PAX_MPROTECT
53935 + pax_flags |= MF_PAX_MPROTECT;
53936 +#endif
53937 +
53938 +#ifdef CONFIG_PAX_RANDMMAP
53939 + if (randomize_va_space)
53940 + pax_flags |= MF_PAX_RANDMMAP;
53941 +#endif
53942 +
53943 +#endif
53944 +
53945 + return pax_flags;
53946 +}
53947 +
53948 +static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
53949 +{
53950 +
53951 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
53952 + unsigned long i;
53953 +
53954 + for (i = 0UL; i < elf_ex->e_phnum; i++)
53955 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
53956 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
53957 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
53958 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
53959 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
53960 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
53961 + return ~0UL;
53962 +
53963 +#ifdef CONFIG_PAX_SOFTMODE
53964 + if (pax_softmode)
53965 + return pax_parse_pt_pax_softmode(&elf_phdata[i]);
53966 + else
53967 +#endif
53968 +
53969 + return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
53970 + break;
53971 + }
53972 +#endif
53973 +
53974 + return ~0UL;
53975 +}
53976 +
53977 +static unsigned long pax_parse_xattr_pax(struct file * const file)
53978 +{
53979 +
53980 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
53981 + ssize_t xattr_size, i;
53982 + unsigned char xattr_value[sizeof("pemrs") - 1];
53983 + unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
53984 +
53985 + xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
53986 + switch (xattr_size) {
53987 + default:
53988 + return ~0UL;
53989 +
53990 + case -ENODATA:
53991 + break;
53992 +
53993 + case 0 ... sizeof xattr_value:
53994 + for (i = 0; i < xattr_size; i++)
53995 + switch (xattr_value[i]) {
53996 + default:
53997 + return ~0UL;
53998 +
53999 +#define parse_flag(option1, option2, flag) \
54000 + case option1: \
54001 + if (pax_flags_hardmode & MF_PAX_##flag) \
54002 + return ~0UL; \
54003 + pax_flags_hardmode |= MF_PAX_##flag; \
54004 + break; \
54005 + case option2: \
54006 + if (pax_flags_softmode & MF_PAX_##flag) \
54007 + return ~0UL; \
54008 + pax_flags_softmode |= MF_PAX_##flag; \
54009 + break;
54010 +
54011 + parse_flag('p', 'P', PAGEEXEC);
54012 + parse_flag('e', 'E', EMUTRAMP);
54013 + parse_flag('m', 'M', MPROTECT);
54014 + parse_flag('r', 'R', RANDMMAP);
54015 + parse_flag('s', 'S', SEGMEXEC);
54016 +
54017 +#undef parse_flag
54018 + }
54019 + break;
54020 + }
54021 +
54022 + if (pax_flags_hardmode & pax_flags_softmode)
54023 + return ~0UL;
54024 +
54025 +#ifdef CONFIG_PAX_SOFTMODE
54026 + if (pax_softmode)
54027 + return pax_parse_xattr_pax_softmode(pax_flags_softmode);
54028 + else
54029 +#endif
54030 +
54031 + return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
54032 +#else
54033 + return ~0UL;
54034 +#endif
54035 +
54036 +}
54037 +
54038 +static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
54039 +{
54040 + unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
54041 +
54042 + pax_flags = pax_parse_ei_pax(elf_ex);
54043 + pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
54044 + xattr_pax_flags = pax_parse_xattr_pax(file);
54045 +
54046 + if (pt_pax_flags == ~0UL)
54047 + pt_pax_flags = xattr_pax_flags;
54048 + else if (xattr_pax_flags == ~0UL)
54049 + xattr_pax_flags = pt_pax_flags;
54050 + if (pt_pax_flags != xattr_pax_flags)
54051 + return -EINVAL;
54052 + if (pt_pax_flags != ~0UL)
54053 + pax_flags = pt_pax_flags;
54054 +
54055 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
54056 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
54057 + if ((__supported_pte_mask & _PAGE_NX))
54058 + pax_flags &= ~MF_PAX_SEGMEXEC;
54059 + else
54060 + pax_flags &= ~MF_PAX_PAGEEXEC;
54061 + }
54062 +#endif
54063 +
54064 + if (0 > pax_check_flags(&pax_flags))
54065 + return -EINVAL;
54066 +
54067 + current->mm->pax_flags = pax_flags;
54068 + return 0;
54069 +}
54070 +#endif
54071 +
54072 /*
54073 * These are the functions used to load ELF style executables and shared
54074 * libraries. There is no binary dependent code anywhere else.
54075 @@ -554,6 +901,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
54076 {
54077 unsigned int random_variable = 0;
54078
54079 +#ifdef CONFIG_PAX_RANDUSTACK
54080 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
54081 + return stack_top - current->mm->delta_stack;
54082 +#endif
54083 +
54084 if ((current->flags & PF_RANDOMIZE) &&
54085 !(current->personality & ADDR_NO_RANDOMIZE)) {
54086 random_variable = get_random_int() & STACK_RND_MASK;
54087 @@ -572,7 +924,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
54088 unsigned long load_addr = 0, load_bias = 0;
54089 int load_addr_set = 0;
54090 char * elf_interpreter = NULL;
54091 - unsigned long error;
54092 + unsigned long error = 0;
54093 struct elf_phdr *elf_ppnt, *elf_phdata;
54094 unsigned long elf_bss, elf_brk;
54095 int retval, i;
54096 @@ -582,12 +934,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
54097 unsigned long start_code, end_code, start_data, end_data;
54098 unsigned long reloc_func_desc __maybe_unused = 0;
54099 int executable_stack = EXSTACK_DEFAULT;
54100 - unsigned long def_flags = 0;
54101 struct pt_regs *regs = current_pt_regs();
54102 struct {
54103 struct elfhdr elf_ex;
54104 struct elfhdr interp_elf_ex;
54105 } *loc;
54106 + unsigned long pax_task_size;
54107
54108 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
54109 if (!loc) {
54110 @@ -723,11 +1075,82 @@ static int load_elf_binary(struct linux_binprm *bprm)
54111 goto out_free_dentry;
54112
54113 /* OK, This is the point of no return */
54114 - current->mm->def_flags = def_flags;
54115 + current->mm->def_flags = 0;
54116
54117 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
54118 may depend on the personality. */
54119 SET_PERSONALITY(loc->elf_ex);
54120 +
54121 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54122 + current->mm->pax_flags = 0UL;
54123 +#endif
54124 +
54125 +#ifdef CONFIG_PAX_DLRESOLVE
54126 + current->mm->call_dl_resolve = 0UL;
54127 +#endif
54128 +
54129 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
54130 + current->mm->call_syscall = 0UL;
54131 +#endif
54132 +
54133 +#ifdef CONFIG_PAX_ASLR
54134 + current->mm->delta_mmap = 0UL;
54135 + current->mm->delta_stack = 0UL;
54136 +#endif
54137 +
54138 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54139 + if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
54140 + send_sig(SIGKILL, current, 0);
54141 + goto out_free_dentry;
54142 + }
54143 +#endif
54144 +
54145 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54146 + pax_set_initial_flags(bprm);
54147 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54148 + if (pax_set_initial_flags_func)
54149 + (pax_set_initial_flags_func)(bprm);
54150 +#endif
54151 +
54152 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54153 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
54154 + current->mm->context.user_cs_limit = PAGE_SIZE;
54155 + current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
54156 + }
54157 +#endif
54158 +
54159 +#ifdef CONFIG_PAX_SEGMEXEC
54160 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
54161 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
54162 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
54163 + pax_task_size = SEGMEXEC_TASK_SIZE;
54164 + current->mm->def_flags |= VM_NOHUGEPAGE;
54165 + } else
54166 +#endif
54167 +
54168 + pax_task_size = TASK_SIZE;
54169 +
54170 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
54171 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
54172 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
54173 + put_cpu();
54174 + }
54175 +#endif
54176 +
54177 +#ifdef CONFIG_PAX_ASLR
54178 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
54179 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
54180 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
54181 + }
54182 +#endif
54183 +
54184 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
54185 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
54186 + executable_stack = EXSTACK_DISABLE_X;
54187 + current->personality &= ~READ_IMPLIES_EXEC;
54188 + } else
54189 +#endif
54190 +
54191 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
54192 current->personality |= READ_IMPLIES_EXEC;
54193
54194 @@ -817,6 +1240,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
54195 #else
54196 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
54197 #endif
54198 +
54199 +#ifdef CONFIG_PAX_RANDMMAP
54200 + /* PaX: randomize base address at the default exe base if requested */
54201 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
54202 +#ifdef CONFIG_SPARC64
54203 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
54204 +#else
54205 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
54206 +#endif
54207 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
54208 + elf_flags |= MAP_FIXED;
54209 + }
54210 +#endif
54211 +
54212 }
54213
54214 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
54215 @@ -849,9 +1286,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
54216 * allowed task size. Note that p_filesz must always be
54217 * <= p_memsz so it is only necessary to check p_memsz.
54218 */
54219 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
54220 - elf_ppnt->p_memsz > TASK_SIZE ||
54221 - TASK_SIZE - elf_ppnt->p_memsz < k) {
54222 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
54223 + elf_ppnt->p_memsz > pax_task_size ||
54224 + pax_task_size - elf_ppnt->p_memsz < k) {
54225 /* set_brk can never work. Avoid overflows. */
54226 send_sig(SIGKILL, current, 0);
54227 retval = -EINVAL;
54228 @@ -890,17 +1327,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
54229 goto out_free_dentry;
54230 }
54231 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
54232 - send_sig(SIGSEGV, current, 0);
54233 - retval = -EFAULT; /* Nobody gets to see this, but.. */
54234 - goto out_free_dentry;
54235 + /*
54236 + * This bss-zeroing can fail if the ELF
54237 + * file specifies odd protections. So
54238 + * we don't check the return value
54239 + */
54240 }
54241
54242 +#ifdef CONFIG_PAX_RANDMMAP
54243 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
54244 + unsigned long start, size, flags;
54245 + vm_flags_t vm_flags;
54246 +
54247 + start = ELF_PAGEALIGN(elf_brk);
54248 + size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
54249 + flags = MAP_FIXED | MAP_PRIVATE;
54250 + vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
54251 +
54252 + down_write(&current->mm->mmap_sem);
54253 + start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
54254 + retval = -ENOMEM;
54255 + if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
54256 +// if (current->personality & ADDR_NO_RANDOMIZE)
54257 +// vm_flags |= VM_READ | VM_MAYREAD;
54258 + start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
54259 + retval = IS_ERR_VALUE(start) ? start : 0;
54260 + }
54261 + up_write(&current->mm->mmap_sem);
54262 + if (retval == 0)
54263 + retval = set_brk(start + size, start + size + PAGE_SIZE);
54264 + if (retval < 0) {
54265 + send_sig(SIGKILL, current, 0);
54266 + goto out_free_dentry;
54267 + }
54268 + }
54269 +#endif
54270 +
54271 if (elf_interpreter) {
54272 - unsigned long interp_map_addr = 0;
54273 -
54274 elf_entry = load_elf_interp(&loc->interp_elf_ex,
54275 interpreter,
54276 - &interp_map_addr,
54277 load_bias);
54278 if (!IS_ERR((void *)elf_entry)) {
54279 /*
54280 @@ -1122,7 +1587,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
54281 * Decide what to dump of a segment, part, all or none.
54282 */
54283 static unsigned long vma_dump_size(struct vm_area_struct *vma,
54284 - unsigned long mm_flags)
54285 + unsigned long mm_flags, long signr)
54286 {
54287 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
54288
54289 @@ -1160,7 +1625,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
54290 if (vma->vm_file == NULL)
54291 return 0;
54292
54293 - if (FILTER(MAPPED_PRIVATE))
54294 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
54295 goto whole;
54296
54297 /*
54298 @@ -1385,9 +1850,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
54299 {
54300 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
54301 int i = 0;
54302 - do
54303 + do {
54304 i += 2;
54305 - while (auxv[i - 2] != AT_NULL);
54306 + } while (auxv[i - 2] != AT_NULL);
54307 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
54308 }
54309
54310 @@ -1396,7 +1861,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
54311 {
54312 mm_segment_t old_fs = get_fs();
54313 set_fs(KERNEL_DS);
54314 - copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
54315 + copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
54316 set_fs(old_fs);
54317 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
54318 }
54319 @@ -2023,14 +2488,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
54320 }
54321
54322 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
54323 - unsigned long mm_flags)
54324 + struct coredump_params *cprm)
54325 {
54326 struct vm_area_struct *vma;
54327 size_t size = 0;
54328
54329 for (vma = first_vma(current, gate_vma); vma != NULL;
54330 vma = next_vma(vma, gate_vma))
54331 - size += vma_dump_size(vma, mm_flags);
54332 + size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
54333 return size;
54334 }
54335
54336 @@ -2123,7 +2588,7 @@ static int elf_core_dump(struct coredump_params *cprm)
54337
54338 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
54339
54340 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
54341 + offset += elf_core_vma_data_size(gate_vma, cprm);
54342 offset += elf_core_extra_data_size();
54343 e_shoff = offset;
54344
54345 @@ -2137,10 +2602,12 @@ static int elf_core_dump(struct coredump_params *cprm)
54346 offset = dataoff;
54347
54348 size += sizeof(*elf);
54349 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
54350 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
54351 goto end_coredump;
54352
54353 size += sizeof(*phdr4note);
54354 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
54355 if (size > cprm->limit
54356 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
54357 goto end_coredump;
54358 @@ -2154,7 +2621,7 @@ static int elf_core_dump(struct coredump_params *cprm)
54359 phdr.p_offset = offset;
54360 phdr.p_vaddr = vma->vm_start;
54361 phdr.p_paddr = 0;
54362 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
54363 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
54364 phdr.p_memsz = vma->vm_end - vma->vm_start;
54365 offset += phdr.p_filesz;
54366 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
54367 @@ -2165,6 +2632,7 @@ static int elf_core_dump(struct coredump_params *cprm)
54368 phdr.p_align = ELF_EXEC_PAGESIZE;
54369
54370 size += sizeof(phdr);
54371 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
54372 if (size > cprm->limit
54373 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
54374 goto end_coredump;
54375 @@ -2189,7 +2657,7 @@ static int elf_core_dump(struct coredump_params *cprm)
54376 unsigned long addr;
54377 unsigned long end;
54378
54379 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
54380 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
54381
54382 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
54383 struct page *page;
54384 @@ -2198,6 +2666,7 @@ static int elf_core_dump(struct coredump_params *cprm)
54385 page = get_dump_page(addr);
54386 if (page) {
54387 void *kaddr = kmap(page);
54388 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
54389 stop = ((size += PAGE_SIZE) > cprm->limit) ||
54390 !dump_write(cprm->file, kaddr,
54391 PAGE_SIZE);
54392 @@ -2215,6 +2684,7 @@ static int elf_core_dump(struct coredump_params *cprm)
54393
54394 if (e_phnum == PN_XNUM) {
54395 size += sizeof(*shdr4extnum);
54396 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
54397 if (size > cprm->limit
54398 || !dump_write(cprm->file, shdr4extnum,
54399 sizeof(*shdr4extnum)))
54400 @@ -2235,6 +2705,167 @@ out:
54401
54402 #endif /* CONFIG_ELF_CORE */
54403
54404 +#ifdef CONFIG_PAX_MPROTECT
54405 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
54406 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
54407 + * we'll remove VM_MAYWRITE for good on RELRO segments.
54408 + *
54409 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
54410 + * basis because we want to allow the common case and not the special ones.
54411 + */
54412 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
54413 +{
54414 + struct elfhdr elf_h;
54415 + struct elf_phdr elf_p;
54416 + unsigned long i;
54417 + unsigned long oldflags;
54418 + bool is_textrel_rw, is_textrel_rx, is_relro;
54419 +
54420 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT) || !vma->vm_file)
54421 + return;
54422 +
54423 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
54424 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
54425 +
54426 +#ifdef CONFIG_PAX_ELFRELOCS
54427 + /* possible TEXTREL */
54428 + is_textrel_rw = !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
54429 + is_textrel_rx = vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
54430 +#else
54431 + is_textrel_rw = false;
54432 + is_textrel_rx = false;
54433 +#endif
54434 +
54435 + /* possible RELRO */
54436 + is_relro = vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
54437 +
54438 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
54439 + return;
54440 +
54441 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
54442 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
54443 +
54444 +#ifdef CONFIG_PAX_ETEXECRELOCS
54445 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
54446 +#else
54447 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
54448 +#endif
54449 +
54450 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
54451 + !elf_check_arch(&elf_h) ||
54452 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
54453 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
54454 + return;
54455 +
54456 + for (i = 0UL; i < elf_h.e_phnum; i++) {
54457 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
54458 + return;
54459 + switch (elf_p.p_type) {
54460 + case PT_DYNAMIC:
54461 + if (!is_textrel_rw && !is_textrel_rx)
54462 + continue;
54463 + i = 0UL;
54464 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
54465 + elf_dyn dyn;
54466 +
54467 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
54468 + break;
54469 + if (dyn.d_tag == DT_NULL)
54470 + break;
54471 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
54472 + gr_log_textrel(vma);
54473 + if (is_textrel_rw)
54474 + vma->vm_flags |= VM_MAYWRITE;
54475 + else
54476 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
54477 + vma->vm_flags &= ~VM_MAYWRITE;
54478 + break;
54479 + }
54480 + i++;
54481 + }
54482 + is_textrel_rw = false;
54483 + is_textrel_rx = false;
54484 + continue;
54485 +
54486 + case PT_GNU_RELRO:
54487 + if (!is_relro)
54488 + continue;
54489 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
54490 + vma->vm_flags &= ~VM_MAYWRITE;
54491 + is_relro = false;
54492 + continue;
54493 +
54494 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
54495 + case PT_PAX_FLAGS: {
54496 + const char *msg_mprotect = "", *msg_emutramp = "";
54497 + char *buffer_lib, *buffer_exe;
54498 +
54499 + if (elf_p.p_flags & PF_NOMPROTECT)
54500 + msg_mprotect = "MPROTECT disabled";
54501 +
54502 +#ifdef CONFIG_PAX_EMUTRAMP
54503 + if (!(vma->vm_mm->pax_flags & MF_PAX_EMUTRAMP) && !(elf_p.p_flags & PF_NOEMUTRAMP))
54504 + msg_emutramp = "EMUTRAMP enabled";
54505 +#endif
54506 +
54507 + if (!msg_mprotect[0] && !msg_emutramp[0])
54508 + continue;
54509 +
54510 + if (!printk_ratelimit())
54511 + continue;
54512 +
54513 + buffer_lib = (char *)__get_free_page(GFP_KERNEL);
54514 + buffer_exe = (char *)__get_free_page(GFP_KERNEL);
54515 + if (buffer_lib && buffer_exe) {
54516 + char *path_lib, *path_exe;
54517 +
54518 + path_lib = pax_get_path(&vma->vm_file->f_path, buffer_lib, PAGE_SIZE);
54519 + path_exe = pax_get_path(&vma->vm_mm->exe_file->f_path, buffer_exe, PAGE_SIZE);
54520 +
54521 + pr_info("PAX: %s wants %s%s%s on %s\n", path_lib, msg_mprotect,
54522 + (msg_mprotect[0] && msg_emutramp[0] ? " and " : ""), msg_emutramp, path_exe);
54523 +
54524 + }
54525 + free_page((unsigned long)buffer_exe);
54526 + free_page((unsigned long)buffer_lib);
54527 + continue;
54528 + }
54529 +#endif
54530 +
54531 + }
54532 + }
54533 +}
54534 +#endif
54535 +
54536 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54537 +
54538 +extern int grsec_enable_log_rwxmaps;
54539 +
54540 +static void elf_handle_mmap(struct file *file)
54541 +{
54542 + struct elfhdr elf_h;
54543 + struct elf_phdr elf_p;
54544 + unsigned long i;
54545 +
54546 + if (!grsec_enable_log_rwxmaps)
54547 + return;
54548 +
54549 + if (sizeof(elf_h) != kernel_read(file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
54550 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
54551 + (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) || !elf_check_arch(&elf_h) ||
54552 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
54553 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
54554 + return;
54555 +
54556 + for (i = 0UL; i < elf_h.e_phnum; i++) {
54557 + if (sizeof(elf_p) != kernel_read(file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
54558 + return;
54559 + if (elf_p.p_type == PT_GNU_STACK && (elf_p.p_flags & PF_X))
54560 + gr_log_ptgnustack(file);
54561 + }
54562 +}
54563 +#endif
54564 +
54565 static int __init init_elf_binfmt(void)
54566 {
54567 register_binfmt(&elf_format);
54568 diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
54569 index d50bbe5..af3b649 100644
54570 --- a/fs/binfmt_flat.c
54571 +++ b/fs/binfmt_flat.c
54572 @@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
54573 realdatastart = (unsigned long) -ENOMEM;
54574 printk("Unable to allocate RAM for process data, errno %d\n",
54575 (int)-realdatastart);
54576 + down_write(&current->mm->mmap_sem);
54577 vm_munmap(textpos, text_len);
54578 + up_write(&current->mm->mmap_sem);
54579 ret = realdatastart;
54580 goto err;
54581 }
54582 @@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
54583 }
54584 if (IS_ERR_VALUE(result)) {
54585 printk("Unable to read data+bss, errno %d\n", (int)-result);
54586 + down_write(&current->mm->mmap_sem);
54587 vm_munmap(textpos, text_len);
54588 vm_munmap(realdatastart, len);
54589 + up_write(&current->mm->mmap_sem);
54590 ret = result;
54591 goto err;
54592 }
54593 @@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
54594 }
54595 if (IS_ERR_VALUE(result)) {
54596 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
54597 + down_write(&current->mm->mmap_sem);
54598 vm_munmap(textpos, text_len + data_len + extra +
54599 MAX_SHARED_LIBS * sizeof(unsigned long));
54600 + up_write(&current->mm->mmap_sem);
54601 ret = result;
54602 goto err;
54603 }
54604 diff --git a/fs/bio.c b/fs/bio.c
54605 index ea5035d..a2932eb 100644
54606 --- a/fs/bio.c
54607 +++ b/fs/bio.c
54608 @@ -1106,7 +1106,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
54609 /*
54610 * Overflow, abort
54611 */
54612 - if (end < start)
54613 + if (end < start || end - start > INT_MAX - nr_pages)
54614 return ERR_PTR(-EINVAL);
54615
54616 nr_pages += end - start;
54617 @@ -1240,7 +1240,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
54618 /*
54619 * Overflow, abort
54620 */
54621 - if (end < start)
54622 + if (end < start || end - start > INT_MAX - nr_pages)
54623 return ERR_PTR(-EINVAL);
54624
54625 nr_pages += end - start;
54626 @@ -1502,7 +1502,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
54627 const int read = bio_data_dir(bio) == READ;
54628 struct bio_map_data *bmd = bio->bi_private;
54629 int i;
54630 - char *p = bmd->sgvecs[0].iov_base;
54631 + char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
54632
54633 bio_for_each_segment_all(bvec, bio, i) {
54634 char *addr = page_address(bvec->bv_page);
54635 diff --git a/fs/block_dev.c b/fs/block_dev.c
54636 index 1e86823..8e34695 100644
54637 --- a/fs/block_dev.c
54638 +++ b/fs/block_dev.c
54639 @@ -637,7 +637,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
54640 else if (bdev->bd_contains == bdev)
54641 return true; /* is a whole device which isn't held */
54642
54643 - else if (whole->bd_holder == bd_may_claim)
54644 + else if (whole->bd_holder == (void *)bd_may_claim)
54645 return true; /* is a partition of a device that is being partitioned */
54646 else if (whole->bd_holder != NULL)
54647 return false; /* is a partition of a held device */
54648 diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
54649 index b544a44..f3fb987 100644
54650 --- a/fs/btrfs/ctree.c
54651 +++ b/fs/btrfs/ctree.c
54652 @@ -1028,9 +1028,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
54653 free_extent_buffer(buf);
54654 add_root_to_dirty_list(root);
54655 } else {
54656 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
54657 - parent_start = parent->start;
54658 - else
54659 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
54660 + if (parent)
54661 + parent_start = parent->start;
54662 + else
54663 + parent_start = 0;
54664 + } else
54665 parent_start = 0;
54666
54667 WARN_ON(trans->transid != btrfs_header_generation(parent));
54668 diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
54669 index cbd9523..5cca781 100644
54670 --- a/fs/btrfs/delayed-inode.c
54671 +++ b/fs/btrfs/delayed-inode.c
54672 @@ -459,7 +459,7 @@ static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node,
54673
54674 static void finish_one_item(struct btrfs_delayed_root *delayed_root)
54675 {
54676 - int seq = atomic_inc_return(&delayed_root->items_seq);
54677 + int seq = atomic_inc_return_unchecked(&delayed_root->items_seq);
54678 if ((atomic_dec_return(&delayed_root->items) <
54679 BTRFS_DELAYED_BACKGROUND || seq % BTRFS_DELAYED_BATCH == 0) &&
54680 waitqueue_active(&delayed_root->wait))
54681 @@ -1378,7 +1378,7 @@ void btrfs_assert_delayed_root_empty(struct btrfs_root *root)
54682 static int refs_newer(struct btrfs_delayed_root *delayed_root,
54683 int seq, int count)
54684 {
54685 - int val = atomic_read(&delayed_root->items_seq);
54686 + int val = atomic_read_unchecked(&delayed_root->items_seq);
54687
54688 if (val < seq || val >= seq + count)
54689 return 1;
54690 @@ -1395,7 +1395,7 @@ void btrfs_balance_delayed_items(struct btrfs_root *root)
54691 if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND)
54692 return;
54693
54694 - seq = atomic_read(&delayed_root->items_seq);
54695 + seq = atomic_read_unchecked(&delayed_root->items_seq);
54696
54697 if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) {
54698 int ret;
54699 diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
54700 index a4b38f9..f86a509 100644
54701 --- a/fs/btrfs/delayed-inode.h
54702 +++ b/fs/btrfs/delayed-inode.h
54703 @@ -43,7 +43,7 @@ struct btrfs_delayed_root {
54704 */
54705 struct list_head prepare_list;
54706 atomic_t items; /* for delayed items */
54707 - atomic_t items_seq; /* for delayed items */
54708 + atomic_unchecked_t items_seq; /* for delayed items */
54709 int nodes; /* for delayed nodes */
54710 wait_queue_head_t wait;
54711 };
54712 @@ -87,7 +87,7 @@ static inline void btrfs_init_delayed_root(
54713 struct btrfs_delayed_root *delayed_root)
54714 {
54715 atomic_set(&delayed_root->items, 0);
54716 - atomic_set(&delayed_root->items_seq, 0);
54717 + atomic_set_unchecked(&delayed_root->items_seq, 0);
54718 delayed_root->nodes = 0;
54719 spin_lock_init(&delayed_root->lock);
54720 init_waitqueue_head(&delayed_root->wait);
54721 diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
54722 index 8747feb..ad1655c 100644
54723 --- a/fs/btrfs/ioctl.c
54724 +++ b/fs/btrfs/ioctl.c
54725 @@ -3465,9 +3465,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
54726 for (i = 0; i < num_types; i++) {
54727 struct btrfs_space_info *tmp;
54728
54729 + /* Don't copy in more than we allocated */
54730 if (!slot_count)
54731 break;
54732
54733 + slot_count--;
54734 +
54735 info = NULL;
54736 rcu_read_lock();
54737 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
54738 @@ -3489,10 +3492,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
54739 memcpy(dest, &space, sizeof(space));
54740 dest++;
54741 space_args.total_spaces++;
54742 - slot_count--;
54743 }
54744 - if (!slot_count)
54745 - break;
54746 }
54747 up_read(&info->groups_sem);
54748 }
54749 diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
54750 index e913328..a34fb36 100644
54751 --- a/fs/btrfs/super.c
54752 +++ b/fs/btrfs/super.c
54753 @@ -266,7 +266,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
54754 function, line, errstr);
54755 return;
54756 }
54757 - ACCESS_ONCE(trans->transaction->aborted) = errno;
54758 + ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
54759 /* Wake up anybody who may be waiting on this transaction */
54760 wake_up(&root->fs_info->transaction_wait);
54761 wake_up(&root->fs_info->transaction_blocked_wait);
54762 diff --git a/fs/buffer.c b/fs/buffer.c
54763 index 6024877..7bd000a 100644
54764 --- a/fs/buffer.c
54765 +++ b/fs/buffer.c
54766 @@ -3426,7 +3426,7 @@ void __init buffer_init(void)
54767 bh_cachep = kmem_cache_create("buffer_head",
54768 sizeof(struct buffer_head), 0,
54769 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
54770 - SLAB_MEM_SPREAD),
54771 + SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
54772 NULL);
54773
54774 /*
54775 diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
54776 index 622f469..e8d2d55 100644
54777 --- a/fs/cachefiles/bind.c
54778 +++ b/fs/cachefiles/bind.c
54779 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
54780 args);
54781
54782 /* start by checking things over */
54783 - ASSERT(cache->fstop_percent >= 0 &&
54784 - cache->fstop_percent < cache->fcull_percent &&
54785 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
54786 cache->fcull_percent < cache->frun_percent &&
54787 cache->frun_percent < 100);
54788
54789 - ASSERT(cache->bstop_percent >= 0 &&
54790 - cache->bstop_percent < cache->bcull_percent &&
54791 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
54792 cache->bcull_percent < cache->brun_percent &&
54793 cache->brun_percent < 100);
54794
54795 diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
54796 index 0a1467b..6a53245 100644
54797 --- a/fs/cachefiles/daemon.c
54798 +++ b/fs/cachefiles/daemon.c
54799 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
54800 if (n > buflen)
54801 return -EMSGSIZE;
54802
54803 - if (copy_to_user(_buffer, buffer, n) != 0)
54804 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
54805 return -EFAULT;
54806
54807 return n;
54808 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
54809 if (test_bit(CACHEFILES_DEAD, &cache->flags))
54810 return -EIO;
54811
54812 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
54813 + if (datalen > PAGE_SIZE - 1)
54814 return -EOPNOTSUPP;
54815
54816 /* drag the command string into the kernel so we can parse it */
54817 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
54818 if (args[0] != '%' || args[1] != '\0')
54819 return -EINVAL;
54820
54821 - if (fstop < 0 || fstop >= cache->fcull_percent)
54822 + if (fstop >= cache->fcull_percent)
54823 return cachefiles_daemon_range_error(cache, args);
54824
54825 cache->fstop_percent = fstop;
54826 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
54827 if (args[0] != '%' || args[1] != '\0')
54828 return -EINVAL;
54829
54830 - if (bstop < 0 || bstop >= cache->bcull_percent)
54831 + if (bstop >= cache->bcull_percent)
54832 return cachefiles_daemon_range_error(cache, args);
54833
54834 cache->bstop_percent = bstop;
54835 diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
54836 index 5349473..d6c0b93 100644
54837 --- a/fs/cachefiles/internal.h
54838 +++ b/fs/cachefiles/internal.h
54839 @@ -59,7 +59,7 @@ struct cachefiles_cache {
54840 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
54841 struct rb_root active_nodes; /* active nodes (can't be culled) */
54842 rwlock_t active_lock; /* lock for active_nodes */
54843 - atomic_t gravecounter; /* graveyard uniquifier */
54844 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
54845 unsigned frun_percent; /* when to stop culling (% files) */
54846 unsigned fcull_percent; /* when to start culling (% files) */
54847 unsigned fstop_percent; /* when to stop allocating (% files) */
54848 @@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
54849 * proc.c
54850 */
54851 #ifdef CONFIG_CACHEFILES_HISTOGRAM
54852 -extern atomic_t cachefiles_lookup_histogram[HZ];
54853 -extern atomic_t cachefiles_mkdir_histogram[HZ];
54854 -extern atomic_t cachefiles_create_histogram[HZ];
54855 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
54856 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
54857 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
54858
54859 extern int __init cachefiles_proc_init(void);
54860 extern void cachefiles_proc_cleanup(void);
54861 static inline
54862 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
54863 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
54864 {
54865 unsigned long jif = jiffies - start_jif;
54866 if (jif >= HZ)
54867 jif = HZ - 1;
54868 - atomic_inc(&histogram[jif]);
54869 + atomic_inc_unchecked(&histogram[jif]);
54870 }
54871
54872 #else
54873 diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
54874 index f4a08d7..5aa4599 100644
54875 --- a/fs/cachefiles/namei.c
54876 +++ b/fs/cachefiles/namei.c
54877 @@ -317,7 +317,7 @@ try_again:
54878 /* first step is to make up a grave dentry in the graveyard */
54879 sprintf(nbuffer, "%08x%08x",
54880 (uint32_t) get_seconds(),
54881 - (uint32_t) atomic_inc_return(&cache->gravecounter));
54882 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
54883
54884 /* do the multiway lock magic */
54885 trap = lock_rename(cache->graveyard, dir);
54886 diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
54887 index eccd339..4c1d995 100644
54888 --- a/fs/cachefiles/proc.c
54889 +++ b/fs/cachefiles/proc.c
54890 @@ -14,9 +14,9 @@
54891 #include <linux/seq_file.h>
54892 #include "internal.h"
54893
54894 -atomic_t cachefiles_lookup_histogram[HZ];
54895 -atomic_t cachefiles_mkdir_histogram[HZ];
54896 -atomic_t cachefiles_create_histogram[HZ];
54897 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
54898 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
54899 +atomic_unchecked_t cachefiles_create_histogram[HZ];
54900
54901 /*
54902 * display the latency histogram
54903 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
54904 return 0;
54905 default:
54906 index = (unsigned long) v - 3;
54907 - x = atomic_read(&cachefiles_lookup_histogram[index]);
54908 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
54909 - z = atomic_read(&cachefiles_create_histogram[index]);
54910 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
54911 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
54912 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
54913 if (x == 0 && y == 0 && z == 0)
54914 return 0;
54915
54916 diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
54917 index ebaff36..7e3ea26 100644
54918 --- a/fs/cachefiles/rdwr.c
54919 +++ b/fs/cachefiles/rdwr.c
54920 @@ -950,7 +950,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
54921 old_fs = get_fs();
54922 set_fs(KERNEL_DS);
54923 ret = file->f_op->write(
54924 - file, (const void __user *) data, len, &pos);
54925 + file, (const void __force_user *) data, len, &pos);
54926 set_fs(old_fs);
54927 kunmap(page);
54928 file_end_write(file);
54929 diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
54930 index 868b61d..58835a5 100644
54931 --- a/fs/ceph/dir.c
54932 +++ b/fs/ceph/dir.c
54933 @@ -240,7 +240,7 @@ static int ceph_readdir(struct file *file, struct dir_context *ctx)
54934 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
54935 struct ceph_mds_client *mdsc = fsc->mdsc;
54936 unsigned frag = fpos_frag(ctx->pos);
54937 - int off = fpos_off(ctx->pos);
54938 + unsigned int off = fpos_off(ctx->pos);
54939 int err;
54940 u32 ftype;
54941 struct ceph_mds_reply_info_parsed *rinfo;
54942 diff --git a/fs/ceph/super.c b/fs/ceph/super.c
54943 index 6a0951e..03fac6d 100644
54944 --- a/fs/ceph/super.c
54945 +++ b/fs/ceph/super.c
54946 @@ -870,7 +870,7 @@ static int ceph_compare_super(struct super_block *sb, void *data)
54947 /*
54948 * construct our own bdi so we can control readahead, etc.
54949 */
54950 -static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
54951 +static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
54952
54953 static int ceph_register_bdi(struct super_block *sb,
54954 struct ceph_fs_client *fsc)
54955 @@ -887,7 +887,7 @@ static int ceph_register_bdi(struct super_block *sb,
54956 default_backing_dev_info.ra_pages;
54957
54958 err = bdi_register(&fsc->backing_dev_info, NULL, "ceph-%ld",
54959 - atomic_long_inc_return(&bdi_seq));
54960 + atomic_long_inc_return_unchecked(&bdi_seq));
54961 if (!err)
54962 sb->s_bdi = &fsc->backing_dev_info;
54963 return err;
54964 diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
54965 index f3ac415..3d2420c 100644
54966 --- a/fs/cifs/cifs_debug.c
54967 +++ b/fs/cifs/cifs_debug.c
54968 @@ -286,8 +286,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
54969
54970 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
54971 #ifdef CONFIG_CIFS_STATS2
54972 - atomic_set(&totBufAllocCount, 0);
54973 - atomic_set(&totSmBufAllocCount, 0);
54974 + atomic_set_unchecked(&totBufAllocCount, 0);
54975 + atomic_set_unchecked(&totSmBufAllocCount, 0);
54976 #endif /* CONFIG_CIFS_STATS2 */
54977 spin_lock(&cifs_tcp_ses_lock);
54978 list_for_each(tmp1, &cifs_tcp_ses_list) {
54979 @@ -300,7 +300,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
54980 tcon = list_entry(tmp3,
54981 struct cifs_tcon,
54982 tcon_list);
54983 - atomic_set(&tcon->num_smbs_sent, 0);
54984 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
54985 if (server->ops->clear_stats)
54986 server->ops->clear_stats(tcon);
54987 }
54988 @@ -332,8 +332,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
54989 smBufAllocCount.counter, cifs_min_small);
54990 #ifdef CONFIG_CIFS_STATS2
54991 seq_printf(m, "Total Large %d Small %d Allocations\n",
54992 - atomic_read(&totBufAllocCount),
54993 - atomic_read(&totSmBufAllocCount));
54994 + atomic_read_unchecked(&totBufAllocCount),
54995 + atomic_read_unchecked(&totSmBufAllocCount));
54996 #endif /* CONFIG_CIFS_STATS2 */
54997
54998 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
54999 @@ -362,7 +362,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
55000 if (tcon->need_reconnect)
55001 seq_puts(m, "\tDISCONNECTED ");
55002 seq_printf(m, "\nSMBs: %d",
55003 - atomic_read(&tcon->num_smbs_sent));
55004 + atomic_read_unchecked(&tcon->num_smbs_sent));
55005 if (server->ops->print_stats)
55006 server->ops->print_stats(m, tcon);
55007 }
55008 diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
55009 index 77fc5e1..e3d13e6 100644
55010 --- a/fs/cifs/cifsfs.c
55011 +++ b/fs/cifs/cifsfs.c
55012 @@ -1056,7 +1056,7 @@ cifs_init_request_bufs(void)
55013 */
55014 cifs_req_cachep = kmem_cache_create("cifs_request",
55015 CIFSMaxBufSize + max_hdr_size, 0,
55016 - SLAB_HWCACHE_ALIGN, NULL);
55017 + SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
55018 if (cifs_req_cachep == NULL)
55019 return -ENOMEM;
55020
55021 @@ -1083,7 +1083,7 @@ cifs_init_request_bufs(void)
55022 efficient to alloc 1 per page off the slab compared to 17K (5page)
55023 alloc of large cifs buffers even when page debugging is on */
55024 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
55025 - MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
55026 + MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
55027 NULL);
55028 if (cifs_sm_req_cachep == NULL) {
55029 mempool_destroy(cifs_req_poolp);
55030 @@ -1168,8 +1168,8 @@ init_cifs(void)
55031 atomic_set(&bufAllocCount, 0);
55032 atomic_set(&smBufAllocCount, 0);
55033 #ifdef CONFIG_CIFS_STATS2
55034 - atomic_set(&totBufAllocCount, 0);
55035 - atomic_set(&totSmBufAllocCount, 0);
55036 + atomic_set_unchecked(&totBufAllocCount, 0);
55037 + atomic_set_unchecked(&totSmBufAllocCount, 0);
55038 #endif /* CONFIG_CIFS_STATS2 */
55039
55040 atomic_set(&midCount, 0);
55041 diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
55042 index c8e03f8..75362f6 100644
55043 --- a/fs/cifs/cifsglob.h
55044 +++ b/fs/cifs/cifsglob.h
55045 @@ -758,35 +758,35 @@ struct cifs_tcon {
55046 __u16 Flags; /* optional support bits */
55047 enum statusEnum tidStatus;
55048 #ifdef CONFIG_CIFS_STATS
55049 - atomic_t num_smbs_sent;
55050 + atomic_unchecked_t num_smbs_sent;
55051 union {
55052 struct {
55053 - atomic_t num_writes;
55054 - atomic_t num_reads;
55055 - atomic_t num_flushes;
55056 - atomic_t num_oplock_brks;
55057 - atomic_t num_opens;
55058 - atomic_t num_closes;
55059 - atomic_t num_deletes;
55060 - atomic_t num_mkdirs;
55061 - atomic_t num_posixopens;
55062 - atomic_t num_posixmkdirs;
55063 - atomic_t num_rmdirs;
55064 - atomic_t num_renames;
55065 - atomic_t num_t2renames;
55066 - atomic_t num_ffirst;
55067 - atomic_t num_fnext;
55068 - atomic_t num_fclose;
55069 - atomic_t num_hardlinks;
55070 - atomic_t num_symlinks;
55071 - atomic_t num_locks;
55072 - atomic_t num_acl_get;
55073 - atomic_t num_acl_set;
55074 + atomic_unchecked_t num_writes;
55075 + atomic_unchecked_t num_reads;
55076 + atomic_unchecked_t num_flushes;
55077 + atomic_unchecked_t num_oplock_brks;
55078 + atomic_unchecked_t num_opens;
55079 + atomic_unchecked_t num_closes;
55080 + atomic_unchecked_t num_deletes;
55081 + atomic_unchecked_t num_mkdirs;
55082 + atomic_unchecked_t num_posixopens;
55083 + atomic_unchecked_t num_posixmkdirs;
55084 + atomic_unchecked_t num_rmdirs;
55085 + atomic_unchecked_t num_renames;
55086 + atomic_unchecked_t num_t2renames;
55087 + atomic_unchecked_t num_ffirst;
55088 + atomic_unchecked_t num_fnext;
55089 + atomic_unchecked_t num_fclose;
55090 + atomic_unchecked_t num_hardlinks;
55091 + atomic_unchecked_t num_symlinks;
55092 + atomic_unchecked_t num_locks;
55093 + atomic_unchecked_t num_acl_get;
55094 + atomic_unchecked_t num_acl_set;
55095 } cifs_stats;
55096 #ifdef CONFIG_CIFS_SMB2
55097 struct {
55098 - atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
55099 - atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
55100 + atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
55101 + atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
55102 } smb2_stats;
55103 #endif /* CONFIG_CIFS_SMB2 */
55104 } stats;
55105 @@ -1111,7 +1111,7 @@ convert_delimiter(char *path, char delim)
55106 }
55107
55108 #ifdef CONFIG_CIFS_STATS
55109 -#define cifs_stats_inc atomic_inc
55110 +#define cifs_stats_inc atomic_inc_unchecked
55111
55112 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
55113 unsigned int bytes)
55114 @@ -1477,8 +1477,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
55115 /* Various Debug counters */
55116 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
55117 #ifdef CONFIG_CIFS_STATS2
55118 -GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
55119 -GLOBAL_EXTERN atomic_t totSmBufAllocCount;
55120 +GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
55121 +GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
55122 #endif
55123 GLOBAL_EXTERN atomic_t smBufAllocCount;
55124 GLOBAL_EXTERN atomic_t midCount;
55125 diff --git a/fs/cifs/file.c b/fs/cifs/file.c
55126 index 7ddddf2..2e12dbc 100644
55127 --- a/fs/cifs/file.c
55128 +++ b/fs/cifs/file.c
55129 @@ -1900,10 +1900,14 @@ static int cifs_writepages(struct address_space *mapping,
55130 index = mapping->writeback_index; /* Start from prev offset */
55131 end = -1;
55132 } else {
55133 - index = wbc->range_start >> PAGE_CACHE_SHIFT;
55134 - end = wbc->range_end >> PAGE_CACHE_SHIFT;
55135 - if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
55136 + if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) {
55137 range_whole = true;
55138 + index = 0;
55139 + end = ULONG_MAX;
55140 + } else {
55141 + index = wbc->range_start >> PAGE_CACHE_SHIFT;
55142 + end = wbc->range_end >> PAGE_CACHE_SHIFT;
55143 + }
55144 scanned = true;
55145 }
55146 retry:
55147 diff --git a/fs/cifs/link.c b/fs/cifs/link.c
55148 index 7e36ceb..109252f 100644
55149 --- a/fs/cifs/link.c
55150 +++ b/fs/cifs/link.c
55151 @@ -624,7 +624,7 @@ symlink_exit:
55152
55153 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
55154 {
55155 - char *p = nd_get_link(nd);
55156 + const char *p = nd_get_link(nd);
55157 if (!IS_ERR(p))
55158 kfree(p);
55159 }
55160 diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
55161 index 138a011..cf9e13a 100644
55162 --- a/fs/cifs/misc.c
55163 +++ b/fs/cifs/misc.c
55164 @@ -170,7 +170,7 @@ cifs_buf_get(void)
55165 memset(ret_buf, 0, buf_size + 3);
55166 atomic_inc(&bufAllocCount);
55167 #ifdef CONFIG_CIFS_STATS2
55168 - atomic_inc(&totBufAllocCount);
55169 + atomic_inc_unchecked(&totBufAllocCount);
55170 #endif /* CONFIG_CIFS_STATS2 */
55171 }
55172
55173 @@ -205,7 +205,7 @@ cifs_small_buf_get(void)
55174 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
55175 atomic_inc(&smBufAllocCount);
55176 #ifdef CONFIG_CIFS_STATS2
55177 - atomic_inc(&totSmBufAllocCount);
55178 + atomic_inc_unchecked(&totSmBufAllocCount);
55179 #endif /* CONFIG_CIFS_STATS2 */
55180
55181 }
55182 diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
55183 index e50554b..c011413 100644
55184 --- a/fs/cifs/smb1ops.c
55185 +++ b/fs/cifs/smb1ops.c
55186 @@ -609,27 +609,27 @@ static void
55187 cifs_clear_stats(struct cifs_tcon *tcon)
55188 {
55189 #ifdef CONFIG_CIFS_STATS
55190 - atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
55191 - atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
55192 - atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
55193 - atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
55194 - atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
55195 - atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
55196 - atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
55197 - atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
55198 - atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
55199 - atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
55200 - atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
55201 - atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
55202 - atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
55203 - atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
55204 - atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
55205 - atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
55206 - atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
55207 - atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
55208 - atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
55209 - atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
55210 - atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
55211 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
55212 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
55213 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
55214 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
55215 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
55216 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
55217 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
55218 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
55219 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
55220 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
55221 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
55222 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
55223 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
55224 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
55225 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
55226 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
55227 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
55228 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
55229 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
55230 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
55231 + atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
55232 #endif
55233 }
55234
55235 @@ -638,36 +638,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
55236 {
55237 #ifdef CONFIG_CIFS_STATS
55238 seq_printf(m, " Oplocks breaks: %d",
55239 - atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
55240 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
55241 seq_printf(m, "\nReads: %d Bytes: %llu",
55242 - atomic_read(&tcon->stats.cifs_stats.num_reads),
55243 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
55244 (long long)(tcon->bytes_read));
55245 seq_printf(m, "\nWrites: %d Bytes: %llu",
55246 - atomic_read(&tcon->stats.cifs_stats.num_writes),
55247 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
55248 (long long)(tcon->bytes_written));
55249 seq_printf(m, "\nFlushes: %d",
55250 - atomic_read(&tcon->stats.cifs_stats.num_flushes));
55251 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
55252 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
55253 - atomic_read(&tcon->stats.cifs_stats.num_locks),
55254 - atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
55255 - atomic_read(&tcon->stats.cifs_stats.num_symlinks));
55256 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
55257 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
55258 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
55259 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
55260 - atomic_read(&tcon->stats.cifs_stats.num_opens),
55261 - atomic_read(&tcon->stats.cifs_stats.num_closes),
55262 - atomic_read(&tcon->stats.cifs_stats.num_deletes));
55263 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
55264 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
55265 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
55266 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
55267 - atomic_read(&tcon->stats.cifs_stats.num_posixopens),
55268 - atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
55269 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
55270 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
55271 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
55272 - atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
55273 - atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
55274 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
55275 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
55276 seq_printf(m, "\nRenames: %d T2 Renames %d",
55277 - atomic_read(&tcon->stats.cifs_stats.num_renames),
55278 - atomic_read(&tcon->stats.cifs_stats.num_t2renames));
55279 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
55280 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
55281 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
55282 - atomic_read(&tcon->stats.cifs_stats.num_ffirst),
55283 - atomic_read(&tcon->stats.cifs_stats.num_fnext),
55284 - atomic_read(&tcon->stats.cifs_stats.num_fclose));
55285 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
55286 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
55287 + atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
55288 #endif
55289 }
55290
55291 diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
55292 index 861b332..5506392 100644
55293 --- a/fs/cifs/smb2ops.c
55294 +++ b/fs/cifs/smb2ops.c
55295 @@ -282,8 +282,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
55296 #ifdef CONFIG_CIFS_STATS
55297 int i;
55298 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
55299 - atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
55300 - atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
55301 + atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
55302 + atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
55303 }
55304 #endif
55305 }
55306 @@ -311,65 +311,65 @@ static void
55307 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
55308 {
55309 #ifdef CONFIG_CIFS_STATS
55310 - atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
55311 - atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
55312 + atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
55313 + atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
55314 seq_printf(m, "\nNegotiates: %d sent %d failed",
55315 - atomic_read(&sent[SMB2_NEGOTIATE_HE]),
55316 - atomic_read(&failed[SMB2_NEGOTIATE_HE]));
55317 + atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
55318 + atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
55319 seq_printf(m, "\nSessionSetups: %d sent %d failed",
55320 - atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
55321 - atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
55322 + atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
55323 + atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
55324 seq_printf(m, "\nLogoffs: %d sent %d failed",
55325 - atomic_read(&sent[SMB2_LOGOFF_HE]),
55326 - atomic_read(&failed[SMB2_LOGOFF_HE]));
55327 + atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
55328 + atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
55329 seq_printf(m, "\nTreeConnects: %d sent %d failed",
55330 - atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
55331 - atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
55332 + atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
55333 + atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
55334 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
55335 - atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
55336 - atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
55337 + atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
55338 + atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
55339 seq_printf(m, "\nCreates: %d sent %d failed",
55340 - atomic_read(&sent[SMB2_CREATE_HE]),
55341 - atomic_read(&failed[SMB2_CREATE_HE]));
55342 + atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
55343 + atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
55344 seq_printf(m, "\nCloses: %d sent %d failed",
55345 - atomic_read(&sent[SMB2_CLOSE_HE]),
55346 - atomic_read(&failed[SMB2_CLOSE_HE]));
55347 + atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
55348 + atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
55349 seq_printf(m, "\nFlushes: %d sent %d failed",
55350 - atomic_read(&sent[SMB2_FLUSH_HE]),
55351 - atomic_read(&failed[SMB2_FLUSH_HE]));
55352 + atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
55353 + atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
55354 seq_printf(m, "\nReads: %d sent %d failed",
55355 - atomic_read(&sent[SMB2_READ_HE]),
55356 - atomic_read(&failed[SMB2_READ_HE]));
55357 + atomic_read_unchecked(&sent[SMB2_READ_HE]),
55358 + atomic_read_unchecked(&failed[SMB2_READ_HE]));
55359 seq_printf(m, "\nWrites: %d sent %d failed",
55360 - atomic_read(&sent[SMB2_WRITE_HE]),
55361 - atomic_read(&failed[SMB2_WRITE_HE]));
55362 + atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
55363 + atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
55364 seq_printf(m, "\nLocks: %d sent %d failed",
55365 - atomic_read(&sent[SMB2_LOCK_HE]),
55366 - atomic_read(&failed[SMB2_LOCK_HE]));
55367 + atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
55368 + atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
55369 seq_printf(m, "\nIOCTLs: %d sent %d failed",
55370 - atomic_read(&sent[SMB2_IOCTL_HE]),
55371 - atomic_read(&failed[SMB2_IOCTL_HE]));
55372 + atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
55373 + atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
55374 seq_printf(m, "\nCancels: %d sent %d failed",
55375 - atomic_read(&sent[SMB2_CANCEL_HE]),
55376 - atomic_read(&failed[SMB2_CANCEL_HE]));
55377 + atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
55378 + atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
55379 seq_printf(m, "\nEchos: %d sent %d failed",
55380 - atomic_read(&sent[SMB2_ECHO_HE]),
55381 - atomic_read(&failed[SMB2_ECHO_HE]));
55382 + atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
55383 + atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
55384 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
55385 - atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
55386 - atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
55387 + atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
55388 + atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
55389 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
55390 - atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
55391 - atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
55392 + atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
55393 + atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
55394 seq_printf(m, "\nQueryInfos: %d sent %d failed",
55395 - atomic_read(&sent[SMB2_QUERY_INFO_HE]),
55396 - atomic_read(&failed[SMB2_QUERY_INFO_HE]));
55397 + atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
55398 + atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
55399 seq_printf(m, "\nSetInfos: %d sent %d failed",
55400 - atomic_read(&sent[SMB2_SET_INFO_HE]),
55401 - atomic_read(&failed[SMB2_SET_INFO_HE]));
55402 + atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
55403 + atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
55404 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
55405 - atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
55406 - atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
55407 + atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
55408 + atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
55409 #endif
55410 }
55411
55412 diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
55413 index edccb52..16bc6db 100644
55414 --- a/fs/cifs/smb2pdu.c
55415 +++ b/fs/cifs/smb2pdu.c
55416 @@ -1957,8 +1957,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
55417 default:
55418 cifs_dbg(VFS, "info level %u isn't supported\n",
55419 srch_inf->info_level);
55420 - rc = -EINVAL;
55421 - goto qdir_exit;
55422 + return -EINVAL;
55423 }
55424
55425 req->FileIndex = cpu_to_le32(index);
55426 diff --git a/fs/coda/cache.c b/fs/coda/cache.c
55427 index 1da168c..8bc7ff6 100644
55428 --- a/fs/coda/cache.c
55429 +++ b/fs/coda/cache.c
55430 @@ -24,7 +24,7 @@
55431 #include "coda_linux.h"
55432 #include "coda_cache.h"
55433
55434 -static atomic_t permission_epoch = ATOMIC_INIT(0);
55435 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
55436
55437 /* replace or extend an acl cache hit */
55438 void coda_cache_enter(struct inode *inode, int mask)
55439 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
55440 struct coda_inode_info *cii = ITOC(inode);
55441
55442 spin_lock(&cii->c_lock);
55443 - cii->c_cached_epoch = atomic_read(&permission_epoch);
55444 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
55445 if (!uid_eq(cii->c_uid, current_fsuid())) {
55446 cii->c_uid = current_fsuid();
55447 cii->c_cached_perm = mask;
55448 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
55449 {
55450 struct coda_inode_info *cii = ITOC(inode);
55451 spin_lock(&cii->c_lock);
55452 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
55453 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
55454 spin_unlock(&cii->c_lock);
55455 }
55456
55457 /* remove all acl caches */
55458 void coda_cache_clear_all(struct super_block *sb)
55459 {
55460 - atomic_inc(&permission_epoch);
55461 + atomic_inc_unchecked(&permission_epoch);
55462 }
55463
55464
55465 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
55466 spin_lock(&cii->c_lock);
55467 hit = (mask & cii->c_cached_perm) == mask &&
55468 uid_eq(cii->c_uid, current_fsuid()) &&
55469 - cii->c_cached_epoch == atomic_read(&permission_epoch);
55470 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
55471 spin_unlock(&cii->c_lock);
55472
55473 return hit;
55474 diff --git a/fs/compat.c b/fs/compat.c
55475 index 6af20de..fec3fbb 100644
55476 --- a/fs/compat.c
55477 +++ b/fs/compat.c
55478 @@ -54,7 +54,7 @@
55479 #include <asm/ioctls.h>
55480 #include "internal.h"
55481
55482 -int compat_log = 1;
55483 +int compat_log = 0;
55484
55485 int compat_printk(const char *fmt, ...)
55486 {
55487 @@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
55488
55489 set_fs(KERNEL_DS);
55490 /* The __user pointer cast is valid because of the set_fs() */
55491 - ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
55492 + ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
55493 set_fs(oldfs);
55494 /* truncating is ok because it's a user address */
55495 if (!ret)
55496 @@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
55497 goto out;
55498
55499 ret = -EINVAL;
55500 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
55501 + if (nr_segs > UIO_MAXIOV)
55502 goto out;
55503 if (nr_segs > fast_segs) {
55504 ret = -ENOMEM;
55505 @@ -834,6 +834,7 @@ struct compat_old_linux_dirent {
55506 struct compat_readdir_callback {
55507 struct dir_context ctx;
55508 struct compat_old_linux_dirent __user *dirent;
55509 + struct file * file;
55510 int result;
55511 };
55512
55513 @@ -851,6 +852,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
55514 buf->result = -EOVERFLOW;
55515 return -EOVERFLOW;
55516 }
55517 +
55518 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55519 + return 0;
55520 +
55521 buf->result++;
55522 dirent = buf->dirent;
55523 if (!access_ok(VERIFY_WRITE, dirent,
55524 @@ -882,6 +887,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
55525 if (!f.file)
55526 return -EBADF;
55527
55528 + buf.file = f.file;
55529 error = iterate_dir(f.file, &buf.ctx);
55530 if (buf.result)
55531 error = buf.result;
55532 @@ -901,6 +907,7 @@ struct compat_getdents_callback {
55533 struct dir_context ctx;
55534 struct compat_linux_dirent __user *current_dir;
55535 struct compat_linux_dirent __user *previous;
55536 + struct file * file;
55537 int count;
55538 int error;
55539 };
55540 @@ -922,6 +929,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
55541 buf->error = -EOVERFLOW;
55542 return -EOVERFLOW;
55543 }
55544 +
55545 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55546 + return 0;
55547 +
55548 dirent = buf->previous;
55549 if (dirent) {
55550 if (__put_user(offset, &dirent->d_off))
55551 @@ -967,6 +978,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
55552 if (!f.file)
55553 return -EBADF;
55554
55555 + buf.file = f.file;
55556 error = iterate_dir(f.file, &buf.ctx);
55557 if (error >= 0)
55558 error = buf.error;
55559 @@ -987,6 +999,7 @@ struct compat_getdents_callback64 {
55560 struct dir_context ctx;
55561 struct linux_dirent64 __user *current_dir;
55562 struct linux_dirent64 __user *previous;
55563 + struct file * file;
55564 int count;
55565 int error;
55566 };
55567 @@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
55568 buf->error = -EINVAL; /* only used if we fail.. */
55569 if (reclen > buf->count)
55570 return -EINVAL;
55571 +
55572 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55573 + return 0;
55574 +
55575 dirent = buf->previous;
55576
55577 if (dirent) {
55578 @@ -1052,6 +1069,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
55579 if (!f.file)
55580 return -EBADF;
55581
55582 + buf.file = f.file;
55583 error = iterate_dir(f.file, &buf.ctx);
55584 if (error >= 0)
55585 error = buf.error;
55586 diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
55587 index a81147e..20bf2b5 100644
55588 --- a/fs/compat_binfmt_elf.c
55589 +++ b/fs/compat_binfmt_elf.c
55590 @@ -30,11 +30,13 @@
55591 #undef elf_phdr
55592 #undef elf_shdr
55593 #undef elf_note
55594 +#undef elf_dyn
55595 #undef elf_addr_t
55596 #define elfhdr elf32_hdr
55597 #define elf_phdr elf32_phdr
55598 #define elf_shdr elf32_shdr
55599 #define elf_note elf32_note
55600 +#define elf_dyn Elf32_Dyn
55601 #define elf_addr_t Elf32_Addr
55602
55603 /*
55604 diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
55605 index 5d19acf..9ab093b 100644
55606 --- a/fs/compat_ioctl.c
55607 +++ b/fs/compat_ioctl.c
55608 @@ -621,7 +621,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
55609 return -EFAULT;
55610 if (__get_user(udata, &ss32->iomem_base))
55611 return -EFAULT;
55612 - ss.iomem_base = compat_ptr(udata);
55613 + ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
55614 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
55615 __get_user(ss.port_high, &ss32->port_high))
55616 return -EFAULT;
55617 @@ -702,8 +702,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
55618 for (i = 0; i < nmsgs; i++) {
55619 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
55620 return -EFAULT;
55621 - if (get_user(datap, &umsgs[i].buf) ||
55622 - put_user(compat_ptr(datap), &tmsgs[i].buf))
55623 + if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
55624 + put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
55625 return -EFAULT;
55626 }
55627 return sys_ioctl(fd, cmd, (unsigned long)tdata);
55628 @@ -796,7 +796,7 @@ static int compat_ioctl_preallocate(struct file *file,
55629 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
55630 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
55631 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
55632 - copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
55633 + copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
55634 return -EFAULT;
55635
55636 return ioctl_preallocate(file, p);
55637 @@ -1616,8 +1616,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
55638 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
55639 {
55640 unsigned int a, b;
55641 - a = *(unsigned int *)p;
55642 - b = *(unsigned int *)q;
55643 + a = *(const unsigned int *)p;
55644 + b = *(const unsigned int *)q;
55645 if (a > b)
55646 return 1;
55647 if (a < b)
55648 diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
55649 index 511d415..319d0e5 100644
55650 --- a/fs/configfs/dir.c
55651 +++ b/fs/configfs/dir.c
55652 @@ -1558,7 +1558,8 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
55653 }
55654 for (p = q->next; p != &parent_sd->s_children; p = p->next) {
55655 struct configfs_dirent *next;
55656 - const char *name;
55657 + const unsigned char * name;
55658 + char d_name[sizeof(next->s_dentry->d_iname)];
55659 int len;
55660 struct inode *inode = NULL;
55661
55662 @@ -1567,7 +1568,12 @@ static int configfs_readdir(struct file *file, struct dir_context *ctx)
55663 continue;
55664
55665 name = configfs_get_name(next);
55666 - len = strlen(name);
55667 + if (next->s_dentry && name == next->s_dentry->d_iname) {
55668 + len = next->s_dentry->d_name.len;
55669 + memcpy(d_name, name, len);
55670 + name = d_name;
55671 + } else
55672 + len = strlen(name);
55673
55674 /*
55675 * We'll have a dentry and an inode for
55676 diff --git a/fs/coredump.c b/fs/coredump.c
55677 index 9bdeca1..2a9b08d 100644
55678 --- a/fs/coredump.c
55679 +++ b/fs/coredump.c
55680 @@ -438,8 +438,8 @@ static void wait_for_dump_helpers(struct file *file)
55681 struct pipe_inode_info *pipe = file->private_data;
55682
55683 pipe_lock(pipe);
55684 - pipe->readers++;
55685 - pipe->writers--;
55686 + atomic_inc(&pipe->readers);
55687 + atomic_dec(&pipe->writers);
55688 wake_up_interruptible_sync(&pipe->wait);
55689 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
55690 pipe_unlock(pipe);
55691 @@ -448,11 +448,11 @@ static void wait_for_dump_helpers(struct file *file)
55692 * We actually want wait_event_freezable() but then we need
55693 * to clear TIF_SIGPENDING and improve dump_interrupted().
55694 */
55695 - wait_event_interruptible(pipe->wait, pipe->readers == 1);
55696 + wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
55697
55698 pipe_lock(pipe);
55699 - pipe->readers--;
55700 - pipe->writers++;
55701 + atomic_dec(&pipe->readers);
55702 + atomic_inc(&pipe->writers);
55703 pipe_unlock(pipe);
55704 }
55705
55706 @@ -499,7 +499,9 @@ void do_coredump(siginfo_t *siginfo)
55707 struct files_struct *displaced;
55708 bool need_nonrelative = false;
55709 bool core_dumped = false;
55710 - static atomic_t core_dump_count = ATOMIC_INIT(0);
55711 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
55712 + long signr = siginfo->si_signo;
55713 + int dumpable;
55714 struct coredump_params cprm = {
55715 .siginfo = siginfo,
55716 .regs = signal_pt_regs(),
55717 @@ -512,12 +514,17 @@ void do_coredump(siginfo_t *siginfo)
55718 .mm_flags = mm->flags,
55719 };
55720
55721 - audit_core_dumps(siginfo->si_signo);
55722 + audit_core_dumps(signr);
55723 +
55724 + dumpable = __get_dumpable(cprm.mm_flags);
55725 +
55726 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
55727 + gr_handle_brute_attach(dumpable);
55728
55729 binfmt = mm->binfmt;
55730 if (!binfmt || !binfmt->core_dump)
55731 goto fail;
55732 - if (!__get_dumpable(cprm.mm_flags))
55733 + if (!dumpable)
55734 goto fail;
55735
55736 cred = prepare_creds();
55737 @@ -536,7 +543,7 @@ void do_coredump(siginfo_t *siginfo)
55738 need_nonrelative = true;
55739 }
55740
55741 - retval = coredump_wait(siginfo->si_signo, &core_state);
55742 + retval = coredump_wait(signr, &core_state);
55743 if (retval < 0)
55744 goto fail_creds;
55745
55746 @@ -579,7 +586,7 @@ void do_coredump(siginfo_t *siginfo)
55747 }
55748 cprm.limit = RLIM_INFINITY;
55749
55750 - dump_count = atomic_inc_return(&core_dump_count);
55751 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
55752 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
55753 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
55754 task_tgid_vnr(current), current->comm);
55755 @@ -611,6 +618,8 @@ void do_coredump(siginfo_t *siginfo)
55756 } else {
55757 struct inode *inode;
55758
55759 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
55760 +
55761 if (cprm.limit < binfmt->min_coredump)
55762 goto fail_unlock;
55763
55764 @@ -669,7 +678,7 @@ close_fail:
55765 filp_close(cprm.file, NULL);
55766 fail_dropcount:
55767 if (ispipe)
55768 - atomic_dec(&core_dump_count);
55769 + atomic_dec_unchecked(&core_dump_count);
55770 fail_unlock:
55771 kfree(cn.corename);
55772 coredump_finish(mm, core_dumped);
55773 @@ -689,7 +698,7 @@ int dump_write(struct file *file, const void *addr, int nr)
55774 {
55775 return !dump_interrupted() &&
55776 access_ok(VERIFY_READ, addr, nr) &&
55777 - file->f_op->write(file, addr, nr, &file->f_pos) == nr;
55778 + file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
55779 }
55780 EXPORT_SYMBOL(dump_write);
55781
55782 diff --git a/fs/dcache.c b/fs/dcache.c
55783 index 89f9671..d2dce57 100644
55784 --- a/fs/dcache.c
55785 +++ b/fs/dcache.c
55786 @@ -1570,7 +1570,7 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name)
55787 */
55788 dentry->d_iname[DNAME_INLINE_LEN-1] = 0;
55789 if (name->len > DNAME_INLINE_LEN-1) {
55790 - dname = kmalloc(name->len + 1, GFP_KERNEL);
55791 + dname = kmalloc(round_up(name->len + 1, sizeof(unsigned long)), GFP_KERNEL);
55792 if (!dname) {
55793 kmem_cache_free(dentry_cache, dentry);
55794 return NULL;
55795 @@ -2893,6 +2893,7 @@ static int prepend_path(const struct path *path,
55796 restart:
55797 bptr = *buffer;
55798 blen = *buflen;
55799 + error = 0;
55800 dentry = path->dentry;
55801 vfsmnt = path->mnt;
55802 mnt = real_mount(vfsmnt);
55803 @@ -3432,7 +3433,8 @@ void __init vfs_caches_init(unsigned long mempages)
55804 mempages -= reserve;
55805
55806 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
55807 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
55808 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
55809 + SLAB_NO_SANITIZE, NULL);
55810
55811 dcache_init();
55812 inode_init();
55813 diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
55814 index c7c83ff..bda9461 100644
55815 --- a/fs/debugfs/inode.c
55816 +++ b/fs/debugfs/inode.c
55817 @@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
55818 */
55819 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
55820 {
55821 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
55822 + return __create_file(name, S_IFDIR | S_IRWXU,
55823 +#else
55824 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
55825 +#endif
55826 parent, NULL, NULL);
55827 }
55828 EXPORT_SYMBOL_GPL(debugfs_create_dir);
55829 diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
55830 index 67e9b63..a9adb68 100644
55831 --- a/fs/ecryptfs/inode.c
55832 +++ b/fs/ecryptfs/inode.c
55833 @@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
55834 old_fs = get_fs();
55835 set_fs(get_ds());
55836 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
55837 - (char __user *)lower_buf,
55838 + (char __force_user *)lower_buf,
55839 PATH_MAX);
55840 set_fs(old_fs);
55841 if (rc < 0)
55842 @@ -706,7 +706,7 @@ out:
55843 static void
55844 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
55845 {
55846 - char *buf = nd_get_link(nd);
55847 + const char *buf = nd_get_link(nd);
55848 if (!IS_ERR(buf)) {
55849 /* Free the char* */
55850 kfree(buf);
55851 diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
55852 index e4141f2..d8263e8 100644
55853 --- a/fs/ecryptfs/miscdev.c
55854 +++ b/fs/ecryptfs/miscdev.c
55855 @@ -304,7 +304,7 @@ check_list:
55856 goto out_unlock_msg_ctx;
55857 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
55858 if (msg_ctx->msg) {
55859 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
55860 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
55861 goto out_unlock_msg_ctx;
55862 i += packet_length_size;
55863 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
55864 diff --git a/fs/exec.c b/fs/exec.c
55865 index bb8afc1..2f5087e 100644
55866 --- a/fs/exec.c
55867 +++ b/fs/exec.c
55868 @@ -55,8 +55,20 @@
55869 #include <linux/pipe_fs_i.h>
55870 #include <linux/oom.h>
55871 #include <linux/compat.h>
55872 +#include <linux/random.h>
55873 +#include <linux/seq_file.h>
55874 +#include <linux/coredump.h>
55875 +#include <linux/mman.h>
55876 +
55877 +#ifdef CONFIG_PAX_REFCOUNT
55878 +#include <linux/kallsyms.h>
55879 +#include <linux/kdebug.h>
55880 +#endif
55881 +
55882 +#include <trace/events/fs.h>
55883
55884 #include <asm/uaccess.h>
55885 +#include <asm/sections.h>
55886 #include <asm/mmu_context.h>
55887 #include <asm/tlb.h>
55888
55889 @@ -66,19 +78,34 @@
55890
55891 #include <trace/events/sched.h>
55892
55893 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
55894 +void __weak pax_set_initial_flags(struct linux_binprm *bprm)
55895 +{
55896 + pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
55897 +}
55898 +#endif
55899 +
55900 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
55901 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
55902 +EXPORT_SYMBOL(pax_set_initial_flags_func);
55903 +#endif
55904 +
55905 int suid_dumpable = 0;
55906
55907 static LIST_HEAD(formats);
55908 static DEFINE_RWLOCK(binfmt_lock);
55909
55910 +extern int gr_process_kernel_exec_ban(void);
55911 +extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
55912 +
55913 void __register_binfmt(struct linux_binfmt * fmt, int insert)
55914 {
55915 BUG_ON(!fmt);
55916 if (WARN_ON(!fmt->load_binary))
55917 return;
55918 write_lock(&binfmt_lock);
55919 - insert ? list_add(&fmt->lh, &formats) :
55920 - list_add_tail(&fmt->lh, &formats);
55921 + insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
55922 + pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
55923 write_unlock(&binfmt_lock);
55924 }
55925
55926 @@ -87,7 +114,7 @@ EXPORT_SYMBOL(__register_binfmt);
55927 void unregister_binfmt(struct linux_binfmt * fmt)
55928 {
55929 write_lock(&binfmt_lock);
55930 - list_del(&fmt->lh);
55931 + pax_list_del((struct list_head *)&fmt->lh);
55932 write_unlock(&binfmt_lock);
55933 }
55934
55935 @@ -183,18 +210,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
55936 int write)
55937 {
55938 struct page *page;
55939 - int ret;
55940
55941 -#ifdef CONFIG_STACK_GROWSUP
55942 - if (write) {
55943 - ret = expand_downwards(bprm->vma, pos);
55944 - if (ret < 0)
55945 - return NULL;
55946 - }
55947 -#endif
55948 - ret = get_user_pages(current, bprm->mm, pos,
55949 - 1, write, 1, &page, NULL);
55950 - if (ret <= 0)
55951 + if (0 > expand_downwards(bprm->vma, pos))
55952 + return NULL;
55953 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
55954 return NULL;
55955
55956 if (write) {
55957 @@ -210,6 +229,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
55958 if (size <= ARG_MAX)
55959 return page;
55960
55961 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55962 + // only allow 512KB for argv+env on suid/sgid binaries
55963 + // to prevent easy ASLR exhaustion
55964 + if (((!uid_eq(bprm->cred->euid, current_euid())) ||
55965 + (!gid_eq(bprm->cred->egid, current_egid()))) &&
55966 + (size > (512 * 1024))) {
55967 + put_page(page);
55968 + return NULL;
55969 + }
55970 +#endif
55971 +
55972 /*
55973 * Limit to 1/4-th the stack size for the argv+env strings.
55974 * This ensures that:
55975 @@ -269,6 +299,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
55976 vma->vm_end = STACK_TOP_MAX;
55977 vma->vm_start = vma->vm_end - PAGE_SIZE;
55978 vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
55979 +
55980 +#ifdef CONFIG_PAX_SEGMEXEC
55981 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
55982 +#endif
55983 +
55984 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
55985 INIT_LIST_HEAD(&vma->anon_vma_chain);
55986
55987 @@ -279,6 +314,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
55988 mm->stack_vm = mm->total_vm = 1;
55989 up_write(&mm->mmap_sem);
55990 bprm->p = vma->vm_end - sizeof(void *);
55991 +
55992 +#ifdef CONFIG_PAX_RANDUSTACK
55993 + if (randomize_va_space)
55994 + bprm->p ^= prandom_u32() & ~PAGE_MASK;
55995 +#endif
55996 +
55997 return 0;
55998 err:
55999 up_write(&mm->mmap_sem);
56000 @@ -399,7 +440,7 @@ struct user_arg_ptr {
56001 } ptr;
56002 };
56003
56004 -static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
56005 +const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
56006 {
56007 const char __user *native;
56008
56009 @@ -408,14 +449,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
56010 compat_uptr_t compat;
56011
56012 if (get_user(compat, argv.ptr.compat + nr))
56013 - return ERR_PTR(-EFAULT);
56014 + return (const char __force_user *)ERR_PTR(-EFAULT);
56015
56016 return compat_ptr(compat);
56017 }
56018 #endif
56019
56020 if (get_user(native, argv.ptr.native + nr))
56021 - return ERR_PTR(-EFAULT);
56022 + return (const char __force_user *)ERR_PTR(-EFAULT);
56023
56024 return native;
56025 }
56026 @@ -434,7 +475,7 @@ static int count(struct user_arg_ptr argv, int max)
56027 if (!p)
56028 break;
56029
56030 - if (IS_ERR(p))
56031 + if (IS_ERR((const char __force_kernel *)p))
56032 return -EFAULT;
56033
56034 if (i >= max)
56035 @@ -469,7 +510,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
56036
56037 ret = -EFAULT;
56038 str = get_user_arg_ptr(argv, argc);
56039 - if (IS_ERR(str))
56040 + if (IS_ERR((const char __force_kernel *)str))
56041 goto out;
56042
56043 len = strnlen_user(str, MAX_ARG_STRLEN);
56044 @@ -551,7 +592,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
56045 int r;
56046 mm_segment_t oldfs = get_fs();
56047 struct user_arg_ptr argv = {
56048 - .ptr.native = (const char __user *const __user *)__argv,
56049 + .ptr.native = (const char __force_user * const __force_user *)__argv,
56050 };
56051
56052 set_fs(KERNEL_DS);
56053 @@ -586,7 +627,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
56054 unsigned long new_end = old_end - shift;
56055 struct mmu_gather tlb;
56056
56057 - BUG_ON(new_start > new_end);
56058 + if (new_start >= new_end || new_start < mmap_min_addr)
56059 + return -ENOMEM;
56060
56061 /*
56062 * ensure there are no vmas between where we want to go
56063 @@ -595,6 +637,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
56064 if (vma != find_vma(mm, new_start))
56065 return -EFAULT;
56066
56067 +#ifdef CONFIG_PAX_SEGMEXEC
56068 + BUG_ON(pax_find_mirror_vma(vma));
56069 +#endif
56070 +
56071 /*
56072 * cover the whole range: [new_start, old_end)
56073 */
56074 @@ -675,10 +721,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
56075 stack_top = arch_align_stack(stack_top);
56076 stack_top = PAGE_ALIGN(stack_top);
56077
56078 - if (unlikely(stack_top < mmap_min_addr) ||
56079 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
56080 - return -ENOMEM;
56081 -
56082 stack_shift = vma->vm_end - stack_top;
56083
56084 bprm->p -= stack_shift;
56085 @@ -690,8 +732,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
56086 bprm->exec -= stack_shift;
56087
56088 down_write(&mm->mmap_sem);
56089 +
56090 + /* Move stack pages down in memory. */
56091 + if (stack_shift) {
56092 + ret = shift_arg_pages(vma, stack_shift);
56093 + if (ret)
56094 + goto out_unlock;
56095 + }
56096 +
56097 vm_flags = VM_STACK_FLAGS;
56098
56099 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
56100 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
56101 + vm_flags &= ~VM_EXEC;
56102 +
56103 +#ifdef CONFIG_PAX_MPROTECT
56104 + if (mm->pax_flags & MF_PAX_MPROTECT)
56105 + vm_flags &= ~VM_MAYEXEC;
56106 +#endif
56107 +
56108 + }
56109 +#endif
56110 +
56111 /*
56112 * Adjust stack execute permissions; explicitly enable for
56113 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
56114 @@ -710,13 +772,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
56115 goto out_unlock;
56116 BUG_ON(prev != vma);
56117
56118 - /* Move stack pages down in memory. */
56119 - if (stack_shift) {
56120 - ret = shift_arg_pages(vma, stack_shift);
56121 - if (ret)
56122 - goto out_unlock;
56123 - }
56124 -
56125 /* mprotect_fixup is overkill to remove the temporary stack flags */
56126 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
56127
56128 @@ -740,6 +795,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
56129 #endif
56130 current->mm->start_stack = bprm->p;
56131 ret = expand_stack(vma, stack_base);
56132 +
56133 +#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
56134 + if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
56135 + unsigned long size;
56136 + vm_flags_t vm_flags;
56137 +
56138 + size = STACK_TOP - vma->vm_end;
56139 + vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
56140 +
56141 + ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
56142 +
56143 +#ifdef CONFIG_X86
56144 + if (!ret) {
56145 + size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
56146 + ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
56147 + }
56148 +#endif
56149 +
56150 + }
56151 +#endif
56152 +
56153 if (ret)
56154 ret = -EFAULT;
56155
56156 @@ -776,6 +852,8 @@ struct file *open_exec(const char *name)
56157
56158 fsnotify_open(file);
56159
56160 + trace_open_exec(name);
56161 +
56162 err = deny_write_access(file);
56163 if (err)
56164 goto exit;
56165 @@ -799,7 +877,7 @@ int kernel_read(struct file *file, loff_t offset,
56166 old_fs = get_fs();
56167 set_fs(get_ds());
56168 /* The cast to a user pointer is valid due to the set_fs() */
56169 - result = vfs_read(file, (void __user *)addr, count, &pos);
56170 + result = vfs_read(file, (void __force_user *)addr, count, &pos);
56171 set_fs(old_fs);
56172 return result;
56173 }
56174 @@ -1255,7 +1333,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
56175 }
56176 rcu_read_unlock();
56177
56178 - if (p->fs->users > n_fs) {
56179 + if (atomic_read(&p->fs->users) > n_fs) {
56180 bprm->unsafe |= LSM_UNSAFE_SHARE;
56181 } else {
56182 res = -EAGAIN;
56183 @@ -1451,6 +1529,31 @@ static int exec_binprm(struct linux_binprm *bprm)
56184 return ret;
56185 }
56186
56187 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56188 +static DEFINE_PER_CPU(u64, exec_counter);
56189 +static int __init init_exec_counters(void)
56190 +{
56191 + unsigned int cpu;
56192 +
56193 + for_each_possible_cpu(cpu) {
56194 + per_cpu(exec_counter, cpu) = (u64)cpu;
56195 + }
56196 +
56197 + return 0;
56198 +}
56199 +early_initcall(init_exec_counters);
56200 +static inline void increment_exec_counter(void)
56201 +{
56202 + BUILD_BUG_ON(NR_CPUS > (1 << 16));
56203 + current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
56204 +}
56205 +#else
56206 +static inline void increment_exec_counter(void) {}
56207 +#endif
56208 +
56209 +extern void gr_handle_exec_args(struct linux_binprm *bprm,
56210 + struct user_arg_ptr argv);
56211 +
56212 /*
56213 * sys_execve() executes a new program.
56214 */
56215 @@ -1458,12 +1561,19 @@ static int do_execve_common(const char *filename,
56216 struct user_arg_ptr argv,
56217 struct user_arg_ptr envp)
56218 {
56219 +#ifdef CONFIG_GRKERNSEC
56220 + struct file *old_exec_file;
56221 + struct acl_subject_label *old_acl;
56222 + struct rlimit old_rlim[RLIM_NLIMITS];
56223 +#endif
56224 struct linux_binprm *bprm;
56225 struct file *file;
56226 struct files_struct *displaced;
56227 bool clear_in_exec;
56228 int retval;
56229
56230 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current_user()->processes), 1);
56231 +
56232 /*
56233 * We move the actual failure in case of RLIMIT_NPROC excess from
56234 * set*uid() to execve() because too many poorly written programs
56235 @@ -1504,12 +1614,22 @@ static int do_execve_common(const char *filename,
56236 if (IS_ERR(file))
56237 goto out_unmark;
56238
56239 + if (gr_ptrace_readexec(file, bprm->unsafe)) {
56240 + retval = -EPERM;
56241 + goto out_file;
56242 + }
56243 +
56244 sched_exec();
56245
56246 bprm->file = file;
56247 bprm->filename = filename;
56248 bprm->interp = filename;
56249
56250 + if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
56251 + retval = -EACCES;
56252 + goto out_file;
56253 + }
56254 +
56255 retval = bprm_mm_init(bprm);
56256 if (retval)
56257 goto out_file;
56258 @@ -1526,24 +1646,70 @@ static int do_execve_common(const char *filename,
56259 if (retval < 0)
56260 goto out;
56261
56262 +#ifdef CONFIG_GRKERNSEC
56263 + old_acl = current->acl;
56264 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
56265 + old_exec_file = current->exec_file;
56266 + get_file(file);
56267 + current->exec_file = file;
56268 +#endif
56269 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56270 + /* limit suid stack to 8MB
56271 + * we saved the old limits above and will restore them if this exec fails
56272 + */
56273 + if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
56274 + (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
56275 + current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
56276 +#endif
56277 +
56278 + if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
56279 + retval = -EPERM;
56280 + goto out_fail;
56281 + }
56282 +
56283 + if (!gr_tpe_allow(file)) {
56284 + retval = -EACCES;
56285 + goto out_fail;
56286 + }
56287 +
56288 + if (gr_check_crash_exec(file)) {
56289 + retval = -EACCES;
56290 + goto out_fail;
56291 + }
56292 +
56293 + retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
56294 + bprm->unsafe);
56295 + if (retval < 0)
56296 + goto out_fail;
56297 +
56298 retval = copy_strings_kernel(1, &bprm->filename, bprm);
56299 if (retval < 0)
56300 - goto out;
56301 + goto out_fail;
56302
56303 bprm->exec = bprm->p;
56304 retval = copy_strings(bprm->envc, envp, bprm);
56305 if (retval < 0)
56306 - goto out;
56307 + goto out_fail;
56308
56309 retval = copy_strings(bprm->argc, argv, bprm);
56310 if (retval < 0)
56311 - goto out;
56312 + goto out_fail;
56313 +
56314 + gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
56315 +
56316 + gr_handle_exec_args(bprm, argv);
56317
56318 retval = exec_binprm(bprm);
56319 if (retval < 0)
56320 - goto out;
56321 + goto out_fail;
56322 +#ifdef CONFIG_GRKERNSEC
56323 + if (old_exec_file)
56324 + fput(old_exec_file);
56325 +#endif
56326
56327 /* execve succeeded */
56328 +
56329 + increment_exec_counter();
56330 current->fs->in_exec = 0;
56331 current->in_execve = 0;
56332 acct_update_integrals(current);
56333 @@ -1552,6 +1718,14 @@ static int do_execve_common(const char *filename,
56334 put_files_struct(displaced);
56335 return retval;
56336
56337 +out_fail:
56338 +#ifdef CONFIG_GRKERNSEC
56339 + current->acl = old_acl;
56340 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
56341 + fput(current->exec_file);
56342 + current->exec_file = old_exec_file;
56343 +#endif
56344 +
56345 out:
56346 if (bprm->mm) {
56347 acct_arg_size(bprm, 0);
56348 @@ -1706,3 +1880,295 @@ asmlinkage long compat_sys_execve(const char __user * filename,
56349 return error;
56350 }
56351 #endif
56352 +
56353 +int pax_check_flags(unsigned long *flags)
56354 +{
56355 + int retval = 0;
56356 +
56357 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
56358 + if (*flags & MF_PAX_SEGMEXEC)
56359 + {
56360 + *flags &= ~MF_PAX_SEGMEXEC;
56361 + retval = -EINVAL;
56362 + }
56363 +#endif
56364 +
56365 + if ((*flags & MF_PAX_PAGEEXEC)
56366 +
56367 +#ifdef CONFIG_PAX_PAGEEXEC
56368 + && (*flags & MF_PAX_SEGMEXEC)
56369 +#endif
56370 +
56371 + )
56372 + {
56373 + *flags &= ~MF_PAX_PAGEEXEC;
56374 + retval = -EINVAL;
56375 + }
56376 +
56377 + if ((*flags & MF_PAX_MPROTECT)
56378 +
56379 +#ifdef CONFIG_PAX_MPROTECT
56380 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
56381 +#endif
56382 +
56383 + )
56384 + {
56385 + *flags &= ~MF_PAX_MPROTECT;
56386 + retval = -EINVAL;
56387 + }
56388 +
56389 + if ((*flags & MF_PAX_EMUTRAMP)
56390 +
56391 +#ifdef CONFIG_PAX_EMUTRAMP
56392 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
56393 +#endif
56394 +
56395 + )
56396 + {
56397 + *flags &= ~MF_PAX_EMUTRAMP;
56398 + retval = -EINVAL;
56399 + }
56400 +
56401 + return retval;
56402 +}
56403 +
56404 +EXPORT_SYMBOL(pax_check_flags);
56405 +
56406 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
56407 +char *pax_get_path(const struct path *path, char *buf, int buflen)
56408 +{
56409 + char *pathname = d_path(path, buf, buflen);
56410 +
56411 + if (IS_ERR(pathname))
56412 + goto toolong;
56413 +
56414 + pathname = mangle_path(buf, pathname, "\t\n\\");
56415 + if (!pathname)
56416 + goto toolong;
56417 +
56418 + *pathname = 0;
56419 + return buf;
56420 +
56421 +toolong:
56422 + return "<path too long>";
56423 +}
56424 +EXPORT_SYMBOL(pax_get_path);
56425 +
56426 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
56427 +{
56428 + struct task_struct *tsk = current;
56429 + struct mm_struct *mm = current->mm;
56430 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
56431 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
56432 + char *path_exec = NULL;
56433 + char *path_fault = NULL;
56434 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
56435 + siginfo_t info = { };
56436 +
56437 + if (buffer_exec && buffer_fault) {
56438 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
56439 +
56440 + down_read(&mm->mmap_sem);
56441 + vma = mm->mmap;
56442 + while (vma && (!vma_exec || !vma_fault)) {
56443 + if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
56444 + vma_exec = vma;
56445 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
56446 + vma_fault = vma;
56447 + vma = vma->vm_next;
56448 + }
56449 + if (vma_exec)
56450 + path_exec = pax_get_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
56451 + if (vma_fault) {
56452 + start = vma_fault->vm_start;
56453 + end = vma_fault->vm_end;
56454 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
56455 + if (vma_fault->vm_file)
56456 + path_fault = pax_get_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
56457 + else if ((unsigned long)pc >= mm->start_brk && (unsigned long)pc < mm->brk)
56458 + path_fault = "<heap>";
56459 + else if (vma_fault->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
56460 + path_fault = "<stack>";
56461 + else
56462 + path_fault = "<anonymous mapping>";
56463 + }
56464 + up_read(&mm->mmap_sem);
56465 + }
56466 + if (tsk->signal->curr_ip)
56467 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
56468 + else
56469 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
56470 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
56471 + from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
56472 + free_page((unsigned long)buffer_exec);
56473 + free_page((unsigned long)buffer_fault);
56474 + pax_report_insns(regs, pc, sp);
56475 + info.si_signo = SIGKILL;
56476 + info.si_errno = 0;
56477 + info.si_code = SI_KERNEL;
56478 + info.si_pid = 0;
56479 + info.si_uid = 0;
56480 + do_coredump(&info);
56481 +}
56482 +#endif
56483 +
56484 +#ifdef CONFIG_PAX_REFCOUNT
56485 +void pax_report_refcount_overflow(struct pt_regs *regs)
56486 +{
56487 + if (current->signal->curr_ip)
56488 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
56489 + &current->signal->curr_ip, current->comm, task_pid_nr(current),
56490 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
56491 + else
56492 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
56493 + from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
56494 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
56495 + preempt_disable();
56496 + show_regs(regs);
56497 + preempt_enable();
56498 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
56499 +}
56500 +#endif
56501 +
56502 +#ifdef CONFIG_PAX_USERCOPY
56503 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
56504 +static noinline int check_stack_object(const void *obj, unsigned long len)
56505 +{
56506 + const void * const stack = task_stack_page(current);
56507 + const void * const stackend = stack + THREAD_SIZE;
56508 +
56509 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
56510 + const void *frame = NULL;
56511 + const void *oldframe;
56512 +#endif
56513 +
56514 + if (obj + len < obj)
56515 + return -1;
56516 +
56517 + if (obj + len <= stack || stackend <= obj)
56518 + return 0;
56519 +
56520 + if (obj < stack || stackend < obj + len)
56521 + return -1;
56522 +
56523 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
56524 + oldframe = __builtin_frame_address(1);
56525 + if (oldframe)
56526 + frame = __builtin_frame_address(2);
56527 + /*
56528 + low ----------------------------------------------> high
56529 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
56530 + ^----------------^
56531 + allow copies only within here
56532 + */
56533 + while (stack <= frame && frame < stackend) {
56534 + /* if obj + len extends past the last frame, this
56535 + check won't pass and the next frame will be 0,
56536 + causing us to bail out and correctly report
56537 + the copy as invalid
56538 + */
56539 + if (obj + len <= frame)
56540 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
56541 + oldframe = frame;
56542 + frame = *(const void * const *)frame;
56543 + }
56544 + return -1;
56545 +#else
56546 + return 1;
56547 +#endif
56548 +}
56549 +
56550 +static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
56551 +{
56552 + if (current->signal->curr_ip)
56553 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
56554 + &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
56555 + else
56556 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
56557 + to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
56558 + dump_stack();
56559 + gr_handle_kernel_exploit();
56560 + do_group_exit(SIGKILL);
56561 +}
56562 +#endif
56563 +
56564 +#ifdef CONFIG_PAX_USERCOPY
56565 +static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
56566 +{
56567 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
56568 + unsigned long textlow = ktla_ktva((unsigned long)_stext);
56569 +#ifdef CONFIG_MODULES
56570 + unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
56571 +#else
56572 + unsigned long texthigh = ktla_ktva((unsigned long)_etext);
56573 +#endif
56574 +
56575 +#else
56576 + unsigned long textlow = (unsigned long)_stext;
56577 + unsigned long texthigh = (unsigned long)_etext;
56578 +
56579 +#ifdef CONFIG_X86_64
56580 + /* check against linear mapping as well */
56581 + if (high > (unsigned long)__va(__pa(textlow)) &&
56582 + low <= (unsigned long)__va(__pa(texthigh)))
56583 + return true;
56584 +#endif
56585 +
56586 +#endif
56587 +
56588 + if (high <= textlow || low > texthigh)
56589 + return false;
56590 + else
56591 + return true;
56592 +}
56593 +#endif
56594 +
56595 +void __check_object_size(const void *ptr, unsigned long n, bool to_user)
56596 +{
56597 +
56598 +#ifdef CONFIG_PAX_USERCOPY
56599 + const char *type;
56600 +
56601 + if (!n)
56602 + return;
56603 +
56604 + type = check_heap_object(ptr, n);
56605 + if (!type) {
56606 + int ret = check_stack_object(ptr, n);
56607 + if (ret == 1 || ret == 2)
56608 + return;
56609 + if (ret == 0) {
56610 + if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
56611 + type = "<kernel text>";
56612 + else
56613 + return;
56614 + } else
56615 + type = "<process stack>";
56616 + }
56617 +
56618 + pax_report_usercopy(ptr, n, to_user, type);
56619 +#endif
56620 +
56621 +}
56622 +EXPORT_SYMBOL(__check_object_size);
56623 +
56624 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
56625 +void pax_track_stack(void)
56626 +{
56627 + unsigned long sp = (unsigned long)&sp;
56628 + if (sp < current_thread_info()->lowest_stack &&
56629 + sp > (unsigned long)task_stack_page(current))
56630 + current_thread_info()->lowest_stack = sp;
56631 +}
56632 +EXPORT_SYMBOL(pax_track_stack);
56633 +#endif
56634 +
56635 +#ifdef CONFIG_PAX_SIZE_OVERFLOW
56636 +void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
56637 +{
56638 + printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
56639 + dump_stack();
56640 + do_group_exit(SIGKILL);
56641 +}
56642 +EXPORT_SYMBOL(report_size_overflow);
56643 +#endif
56644 diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
56645 index 9f9992b..8b59411 100644
56646 --- a/fs/ext2/balloc.c
56647 +++ b/fs/ext2/balloc.c
56648 @@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
56649
56650 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
56651 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
56652 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
56653 + if (free_blocks < root_blocks + 1 &&
56654 !uid_eq(sbi->s_resuid, current_fsuid()) &&
56655 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
56656 - !in_group_p (sbi->s_resgid))) {
56657 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
56658 return 0;
56659 }
56660 return 1;
56661 diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
56662 index 2d7557d..14e38f94 100644
56663 --- a/fs/ext2/xattr.c
56664 +++ b/fs/ext2/xattr.c
56665 @@ -247,7 +247,7 @@ ext2_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
56666 struct buffer_head *bh = NULL;
56667 struct ext2_xattr_entry *entry;
56668 char *end;
56669 - size_t rest = buffer_size;
56670 + size_t rest = buffer_size, total_size = 0;
56671 int error;
56672
56673 ea_idebug(inode, "buffer=%p, buffer_size=%ld",
56674 @@ -305,9 +305,10 @@ bad_block: ext2_error(inode->i_sb, "ext2_xattr_list",
56675 buffer += size;
56676 }
56677 rest -= size;
56678 + total_size += size;
56679 }
56680 }
56681 - error = buffer_size - rest; /* total size */
56682 + error = total_size;
56683
56684 cleanup:
56685 brelse(bh);
56686 diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
56687 index 22548f5..41521d8 100644
56688 --- a/fs/ext3/balloc.c
56689 +++ b/fs/ext3/balloc.c
56690 @@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
56691
56692 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
56693 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
56694 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
56695 + if (free_blocks < root_blocks + 1 &&
56696 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
56697 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
56698 - !in_group_p (sbi->s_resgid))) {
56699 + !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
56700 return 0;
56701 }
56702 return 1;
56703 diff --git a/fs/ext3/xattr.c b/fs/ext3/xattr.c
56704 index b1fc963..881228c 100644
56705 --- a/fs/ext3/xattr.c
56706 +++ b/fs/ext3/xattr.c
56707 @@ -330,7 +330,7 @@ static int
56708 ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
56709 char *buffer, size_t buffer_size)
56710 {
56711 - size_t rest = buffer_size;
56712 + size_t rest = buffer_size, total_size = 0;
56713
56714 for (; !IS_LAST_ENTRY(entry); entry = EXT3_XATTR_NEXT(entry)) {
56715 const struct xattr_handler *handler =
56716 @@ -347,9 +347,10 @@ ext3_xattr_list_entries(struct dentry *dentry, struct ext3_xattr_entry *entry,
56717 buffer += size;
56718 }
56719 rest -= size;
56720 + total_size += size;
56721 }
56722 }
56723 - return buffer_size - rest;
56724 + return total_size;
56725 }
56726
56727 static int
56728 diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
56729 index dc5d572..4c21f8e 100644
56730 --- a/fs/ext4/balloc.c
56731 +++ b/fs/ext4/balloc.c
56732 @@ -534,8 +534,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
56733 /* Hm, nope. Are (enough) root reserved clusters available? */
56734 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
56735 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
56736 - capable(CAP_SYS_RESOURCE) ||
56737 - (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
56738 + (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
56739 + capable_nolog(CAP_SYS_RESOURCE)) {
56740
56741 if (free_clusters >= (nclusters + dirty_clusters +
56742 resv_clusters))
56743 diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
56744 index af815ea..99294a6 100644
56745 --- a/fs/ext4/ext4.h
56746 +++ b/fs/ext4/ext4.h
56747 @@ -1256,19 +1256,19 @@ struct ext4_sb_info {
56748 unsigned long s_mb_last_start;
56749
56750 /* stats for buddy allocator */
56751 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
56752 - atomic_t s_bal_success; /* we found long enough chunks */
56753 - atomic_t s_bal_allocated; /* in blocks */
56754 - atomic_t s_bal_ex_scanned; /* total extents scanned */
56755 - atomic_t s_bal_goals; /* goal hits */
56756 - atomic_t s_bal_breaks; /* too long searches */
56757 - atomic_t s_bal_2orders; /* 2^order hits */
56758 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
56759 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
56760 + atomic_unchecked_t s_bal_allocated; /* in blocks */
56761 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
56762 + atomic_unchecked_t s_bal_goals; /* goal hits */
56763 + atomic_unchecked_t s_bal_breaks; /* too long searches */
56764 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
56765 spinlock_t s_bal_lock;
56766 unsigned long s_mb_buddies_generated;
56767 unsigned long long s_mb_generation_time;
56768 - atomic_t s_mb_lost_chunks;
56769 - atomic_t s_mb_preallocated;
56770 - atomic_t s_mb_discarded;
56771 + atomic_unchecked_t s_mb_lost_chunks;
56772 + atomic_unchecked_t s_mb_preallocated;
56773 + atomic_unchecked_t s_mb_discarded;
56774 atomic_t s_lock_busy;
56775
56776 /* locality groups */
56777 diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
56778 index a41e3ba..e574a00 100644
56779 --- a/fs/ext4/mballoc.c
56780 +++ b/fs/ext4/mballoc.c
56781 @@ -1880,7 +1880,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
56782 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
56783
56784 if (EXT4_SB(sb)->s_mb_stats)
56785 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
56786 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
56787
56788 break;
56789 }
56790 @@ -2189,7 +2189,7 @@ repeat:
56791 ac->ac_status = AC_STATUS_CONTINUE;
56792 ac->ac_flags |= EXT4_MB_HINT_FIRST;
56793 cr = 3;
56794 - atomic_inc(&sbi->s_mb_lost_chunks);
56795 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
56796 goto repeat;
56797 }
56798 }
56799 @@ -2697,25 +2697,25 @@ int ext4_mb_release(struct super_block *sb)
56800 if (sbi->s_mb_stats) {
56801 ext4_msg(sb, KERN_INFO,
56802 "mballoc: %u blocks %u reqs (%u success)",
56803 - atomic_read(&sbi->s_bal_allocated),
56804 - atomic_read(&sbi->s_bal_reqs),
56805 - atomic_read(&sbi->s_bal_success));
56806 + atomic_read_unchecked(&sbi->s_bal_allocated),
56807 + atomic_read_unchecked(&sbi->s_bal_reqs),
56808 + atomic_read_unchecked(&sbi->s_bal_success));
56809 ext4_msg(sb, KERN_INFO,
56810 "mballoc: %u extents scanned, %u goal hits, "
56811 "%u 2^N hits, %u breaks, %u lost",
56812 - atomic_read(&sbi->s_bal_ex_scanned),
56813 - atomic_read(&sbi->s_bal_goals),
56814 - atomic_read(&sbi->s_bal_2orders),
56815 - atomic_read(&sbi->s_bal_breaks),
56816 - atomic_read(&sbi->s_mb_lost_chunks));
56817 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
56818 + atomic_read_unchecked(&sbi->s_bal_goals),
56819 + atomic_read_unchecked(&sbi->s_bal_2orders),
56820 + atomic_read_unchecked(&sbi->s_bal_breaks),
56821 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
56822 ext4_msg(sb, KERN_INFO,
56823 "mballoc: %lu generated and it took %Lu",
56824 sbi->s_mb_buddies_generated,
56825 sbi->s_mb_generation_time);
56826 ext4_msg(sb, KERN_INFO,
56827 "mballoc: %u preallocated, %u discarded",
56828 - atomic_read(&sbi->s_mb_preallocated),
56829 - atomic_read(&sbi->s_mb_discarded));
56830 + atomic_read_unchecked(&sbi->s_mb_preallocated),
56831 + atomic_read_unchecked(&sbi->s_mb_discarded));
56832 }
56833
56834 free_percpu(sbi->s_locality_groups);
56835 @@ -3169,16 +3169,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
56836 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
56837
56838 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
56839 - atomic_inc(&sbi->s_bal_reqs);
56840 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
56841 + atomic_inc_unchecked(&sbi->s_bal_reqs);
56842 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
56843 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
56844 - atomic_inc(&sbi->s_bal_success);
56845 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
56846 + atomic_inc_unchecked(&sbi->s_bal_success);
56847 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
56848 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
56849 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
56850 - atomic_inc(&sbi->s_bal_goals);
56851 + atomic_inc_unchecked(&sbi->s_bal_goals);
56852 if (ac->ac_found > sbi->s_mb_max_to_scan)
56853 - atomic_inc(&sbi->s_bal_breaks);
56854 + atomic_inc_unchecked(&sbi->s_bal_breaks);
56855 }
56856
56857 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
56858 @@ -3578,7 +3578,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
56859 trace_ext4_mb_new_inode_pa(ac, pa);
56860
56861 ext4_mb_use_inode_pa(ac, pa);
56862 - atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
56863 + atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
56864
56865 ei = EXT4_I(ac->ac_inode);
56866 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
56867 @@ -3638,7 +3638,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
56868 trace_ext4_mb_new_group_pa(ac, pa);
56869
56870 ext4_mb_use_group_pa(ac, pa);
56871 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
56872 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
56873
56874 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
56875 lg = ac->ac_lg;
56876 @@ -3727,7 +3727,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
56877 * from the bitmap and continue.
56878 */
56879 }
56880 - atomic_add(free, &sbi->s_mb_discarded);
56881 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
56882
56883 return err;
56884 }
56885 @@ -3745,7 +3745,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
56886 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
56887 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
56888 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
56889 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
56890 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
56891 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
56892
56893 return 0;
56894 diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
56895 index 214461e..3614c89 100644
56896 --- a/fs/ext4/mmp.c
56897 +++ b/fs/ext4/mmp.c
56898 @@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
56899 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
56900 const char *function, unsigned int line, const char *msg)
56901 {
56902 - __ext4_warning(sb, function, line, msg);
56903 + __ext4_warning(sb, function, line, "%s", msg);
56904 __ext4_warning(sb, function, line,
56905 "MMP failure info: last update time: %llu, last update "
56906 "node: %s, last update device: %s\n",
56907 diff --git a/fs/ext4/super.c b/fs/ext4/super.c
56908 index 2c2e6cb..7c3ee62 100644
56909 --- a/fs/ext4/super.c
56910 +++ b/fs/ext4/super.c
56911 @@ -1251,7 +1251,7 @@ static ext4_fsblk_t get_sb_block(void **data)
56912 }
56913
56914 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
56915 -static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
56916 +static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
56917 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
56918
56919 #ifdef CONFIG_QUOTA
56920 @@ -2431,7 +2431,7 @@ struct ext4_attr {
56921 int offset;
56922 int deprecated_val;
56923 } u;
56924 -};
56925 +} __do_const;
56926
56927 static int parse_strtoull(const char *buf,
56928 unsigned long long max, unsigned long long *value)
56929 diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
56930 index 1423c48..9c0c6dc 100644
56931 --- a/fs/ext4/xattr.c
56932 +++ b/fs/ext4/xattr.c
56933 @@ -381,7 +381,7 @@ static int
56934 ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
56935 char *buffer, size_t buffer_size)
56936 {
56937 - size_t rest = buffer_size;
56938 + size_t rest = buffer_size, total_size = 0;
56939
56940 for (; !IS_LAST_ENTRY(entry); entry = EXT4_XATTR_NEXT(entry)) {
56941 const struct xattr_handler *handler =
56942 @@ -398,9 +398,10 @@ ext4_xattr_list_entries(struct dentry *dentry, struct ext4_xattr_entry *entry,
56943 buffer += size;
56944 }
56945 rest -= size;
56946 + total_size += size;
56947 }
56948 }
56949 - return buffer_size - rest;
56950 + return total_size;
56951 }
56952
56953 static int
56954 diff --git a/fs/fcntl.c b/fs/fcntl.c
56955 index 65343c3..9969dcf 100644
56956 --- a/fs/fcntl.c
56957 +++ b/fs/fcntl.c
56958 @@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
56959 if (err)
56960 return err;
56961
56962 + if (gr_handle_chroot_fowner(pid, type))
56963 + return -ENOENT;
56964 + if (gr_check_protected_task_fowner(pid, type))
56965 + return -EACCES;
56966 +
56967 f_modown(filp, pid, type, force);
56968 return 0;
56969 }
56970 diff --git a/fs/fhandle.c b/fs/fhandle.c
56971 index 999ff5c..41f4109 100644
56972 --- a/fs/fhandle.c
56973 +++ b/fs/fhandle.c
56974 @@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
56975 } else
56976 retval = 0;
56977 /* copy the mount id */
56978 - if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
56979 - sizeof(*mnt_id)) ||
56980 + if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
56981 copy_to_user(ufh, handle,
56982 sizeof(struct file_handle) + handle_bytes))
56983 retval = -EFAULT;
56984 diff --git a/fs/file.c b/fs/file.c
56985 index 4a78f98..f9a6d25 100644
56986 --- a/fs/file.c
56987 +++ b/fs/file.c
56988 @@ -16,6 +16,7 @@
56989 #include <linux/slab.h>
56990 #include <linux/vmalloc.h>
56991 #include <linux/file.h>
56992 +#include <linux/security.h>
56993 #include <linux/fdtable.h>
56994 #include <linux/bitops.h>
56995 #include <linux/interrupt.h>
56996 @@ -141,7 +142,7 @@ out:
56997 * Return <0 error code on error; 1 on successful completion.
56998 * The files->file_lock should be held on entry, and will be held on exit.
56999 */
57000 -static int expand_fdtable(struct files_struct *files, int nr)
57001 +static int expand_fdtable(struct files_struct *files, unsigned int nr)
57002 __releases(files->file_lock)
57003 __acquires(files->file_lock)
57004 {
57005 @@ -186,7 +187,7 @@ static int expand_fdtable(struct files_struct *files, int nr)
57006 * expanded and execution may have blocked.
57007 * The files->file_lock should be held on entry, and will be held on exit.
57008 */
57009 -static int expand_files(struct files_struct *files, int nr)
57010 +static int expand_files(struct files_struct *files, unsigned int nr)
57011 {
57012 struct fdtable *fdt;
57013
57014 @@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
57015 if (!file)
57016 return __close_fd(files, fd);
57017
57018 + gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
57019 if (fd >= rlimit(RLIMIT_NOFILE))
57020 return -EBADF;
57021
57022 @@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
57023 if (unlikely(oldfd == newfd))
57024 return -EINVAL;
57025
57026 + gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
57027 if (newfd >= rlimit(RLIMIT_NOFILE))
57028 return -EBADF;
57029
57030 @@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
57031 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
57032 {
57033 int err;
57034 + gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
57035 if (from >= rlimit(RLIMIT_NOFILE))
57036 return -EINVAL;
57037 err = alloc_fd(from, flags);
57038 diff --git a/fs/filesystems.c b/fs/filesystems.c
57039 index 92567d9..fcd8cbf 100644
57040 --- a/fs/filesystems.c
57041 +++ b/fs/filesystems.c
57042 @@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
57043 int len = dot ? dot - name : strlen(name);
57044
57045 fs = __get_fs_type(name, len);
57046 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57047 + if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
57048 +#else
57049 if (!fs && (request_module("fs-%.*s", len, name) == 0))
57050 +#endif
57051 fs = __get_fs_type(name, len);
57052
57053 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
57054 diff --git a/fs/fs_struct.c b/fs/fs_struct.c
57055 index d8ac61d..79a36f0 100644
57056 --- a/fs/fs_struct.c
57057 +++ b/fs/fs_struct.c
57058 @@ -4,6 +4,7 @@
57059 #include <linux/path.h>
57060 #include <linux/slab.h>
57061 #include <linux/fs_struct.h>
57062 +#include <linux/grsecurity.h>
57063 #include "internal.h"
57064
57065 /*
57066 @@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
57067 write_seqcount_begin(&fs->seq);
57068 old_root = fs->root;
57069 fs->root = *path;
57070 + gr_set_chroot_entries(current, path);
57071 write_seqcount_end(&fs->seq);
57072 spin_unlock(&fs->lock);
57073 if (old_root.dentry)
57074 @@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
57075 int hits = 0;
57076 spin_lock(&fs->lock);
57077 write_seqcount_begin(&fs->seq);
57078 + /* this root replacement is only done by pivot_root,
57079 + leave grsec's chroot tagging alone for this task
57080 + so that a pivoted root isn't treated as a chroot
57081 + */
57082 hits += replace_path(&fs->root, old_root, new_root);
57083 hits += replace_path(&fs->pwd, old_root, new_root);
57084 write_seqcount_end(&fs->seq);
57085 @@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
57086 task_lock(tsk);
57087 spin_lock(&fs->lock);
57088 tsk->fs = NULL;
57089 - kill = !--fs->users;
57090 + gr_clear_chroot_entries(tsk);
57091 + kill = !atomic_dec_return(&fs->users);
57092 spin_unlock(&fs->lock);
57093 task_unlock(tsk);
57094 if (kill)
57095 @@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
57096 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
57097 /* We don't need to lock fs - think why ;-) */
57098 if (fs) {
57099 - fs->users = 1;
57100 + atomic_set(&fs->users, 1);
57101 fs->in_exec = 0;
57102 spin_lock_init(&fs->lock);
57103 seqcount_init(&fs->seq);
57104 @@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
57105 spin_lock(&old->lock);
57106 fs->root = old->root;
57107 path_get(&fs->root);
57108 + /* instead of calling gr_set_chroot_entries here,
57109 + we call it from every caller of this function
57110 + */
57111 fs->pwd = old->pwd;
57112 path_get(&fs->pwd);
57113 spin_unlock(&old->lock);
57114 @@ -139,8 +149,9 @@ int unshare_fs_struct(void)
57115
57116 task_lock(current);
57117 spin_lock(&fs->lock);
57118 - kill = !--fs->users;
57119 + kill = !atomic_dec_return(&fs->users);
57120 current->fs = new_fs;
57121 + gr_set_chroot_entries(current, &new_fs->root);
57122 spin_unlock(&fs->lock);
57123 task_unlock(current);
57124
57125 @@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
57126
57127 int current_umask(void)
57128 {
57129 - return current->fs->umask;
57130 + return current->fs->umask | gr_acl_umask();
57131 }
57132 EXPORT_SYMBOL(current_umask);
57133
57134 /* to be mentioned only in INIT_TASK */
57135 struct fs_struct init_fs = {
57136 - .users = 1,
57137 + .users = ATOMIC_INIT(1),
57138 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
57139 .seq = SEQCNT_ZERO,
57140 .umask = 0022,
57141 diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
57142 index b2a86e3..37f425a 100644
57143 --- a/fs/fscache/cookie.c
57144 +++ b/fs/fscache/cookie.c
57145 @@ -19,7 +19,7 @@
57146
57147 struct kmem_cache *fscache_cookie_jar;
57148
57149 -static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
57150 +static atomic_unchecked_t fscache_object_debug_id = ATOMIC_INIT(0);
57151
57152 static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
57153 static int fscache_alloc_object(struct fscache_cache *cache,
57154 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
57155 parent ? (char *) parent->def->name : "<no-parent>",
57156 def->name, netfs_data);
57157
57158 - fscache_stat(&fscache_n_acquires);
57159 + fscache_stat_unchecked(&fscache_n_acquires);
57160
57161 /* if there's no parent cookie, then we don't create one here either */
57162 if (!parent) {
57163 - fscache_stat(&fscache_n_acquires_null);
57164 + fscache_stat_unchecked(&fscache_n_acquires_null);
57165 _leave(" [no parent]");
57166 return NULL;
57167 }
57168 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
57169 /* allocate and initialise a cookie */
57170 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
57171 if (!cookie) {
57172 - fscache_stat(&fscache_n_acquires_oom);
57173 + fscache_stat_unchecked(&fscache_n_acquires_oom);
57174 _leave(" [ENOMEM]");
57175 return NULL;
57176 }
57177 @@ -114,13 +114,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
57178
57179 switch (cookie->def->type) {
57180 case FSCACHE_COOKIE_TYPE_INDEX:
57181 - fscache_stat(&fscache_n_cookie_index);
57182 + fscache_stat_unchecked(&fscache_n_cookie_index);
57183 break;
57184 case FSCACHE_COOKIE_TYPE_DATAFILE:
57185 - fscache_stat(&fscache_n_cookie_data);
57186 + fscache_stat_unchecked(&fscache_n_cookie_data);
57187 break;
57188 default:
57189 - fscache_stat(&fscache_n_cookie_special);
57190 + fscache_stat_unchecked(&fscache_n_cookie_special);
57191 break;
57192 }
57193
57194 @@ -131,13 +131,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
57195 if (fscache_acquire_non_index_cookie(cookie) < 0) {
57196 atomic_dec(&parent->n_children);
57197 __fscache_cookie_put(cookie);
57198 - fscache_stat(&fscache_n_acquires_nobufs);
57199 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
57200 _leave(" = NULL");
57201 return NULL;
57202 }
57203 }
57204
57205 - fscache_stat(&fscache_n_acquires_ok);
57206 + fscache_stat_unchecked(&fscache_n_acquires_ok);
57207 _leave(" = %p", cookie);
57208 return cookie;
57209 }
57210 @@ -173,7 +173,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
57211 cache = fscache_select_cache_for_object(cookie->parent);
57212 if (!cache) {
57213 up_read(&fscache_addremove_sem);
57214 - fscache_stat(&fscache_n_acquires_no_cache);
57215 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
57216 _leave(" = -ENOMEDIUM [no cache]");
57217 return -ENOMEDIUM;
57218 }
57219 @@ -259,14 +259,14 @@ static int fscache_alloc_object(struct fscache_cache *cache,
57220 object = cache->ops->alloc_object(cache, cookie);
57221 fscache_stat_d(&fscache_n_cop_alloc_object);
57222 if (IS_ERR(object)) {
57223 - fscache_stat(&fscache_n_object_no_alloc);
57224 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
57225 ret = PTR_ERR(object);
57226 goto error;
57227 }
57228
57229 - fscache_stat(&fscache_n_object_alloc);
57230 + fscache_stat_unchecked(&fscache_n_object_alloc);
57231
57232 - object->debug_id = atomic_inc_return(&fscache_object_debug_id);
57233 + object->debug_id = atomic_inc_return_unchecked(&fscache_object_debug_id);
57234
57235 _debug("ALLOC OBJ%x: %s {%lx}",
57236 object->debug_id, cookie->def->name, object->events);
57237 @@ -380,7 +380,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
57238
57239 _enter("{%s}", cookie->def->name);
57240
57241 - fscache_stat(&fscache_n_invalidates);
57242 + fscache_stat_unchecked(&fscache_n_invalidates);
57243
57244 /* Only permit invalidation of data files. Invalidating an index will
57245 * require the caller to release all its attachments to the tree rooted
57246 @@ -438,10 +438,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
57247 {
57248 struct fscache_object *object;
57249
57250 - fscache_stat(&fscache_n_updates);
57251 + fscache_stat_unchecked(&fscache_n_updates);
57252
57253 if (!cookie) {
57254 - fscache_stat(&fscache_n_updates_null);
57255 + fscache_stat_unchecked(&fscache_n_updates_null);
57256 _leave(" [no cookie]");
57257 return;
57258 }
57259 @@ -473,12 +473,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
57260 {
57261 struct fscache_object *object;
57262
57263 - fscache_stat(&fscache_n_relinquishes);
57264 + fscache_stat_unchecked(&fscache_n_relinquishes);
57265 if (retire)
57266 - fscache_stat(&fscache_n_relinquishes_retire);
57267 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
57268
57269 if (!cookie) {
57270 - fscache_stat(&fscache_n_relinquishes_null);
57271 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
57272 _leave(" [no cookie]");
57273 return;
57274 }
57275 @@ -598,7 +598,7 @@ int __fscache_check_consistency(struct fscache_cookie *cookie)
57276 if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
57277 goto inconsistent;
57278
57279 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
57280 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
57281
57282 atomic_inc(&cookie->n_active);
57283 if (fscache_submit_op(object, op) < 0)
57284 diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
57285 index 4226f66..0fb3f45 100644
57286 --- a/fs/fscache/internal.h
57287 +++ b/fs/fscache/internal.h
57288 @@ -133,8 +133,8 @@ extern void fscache_operation_gc(struct work_struct *);
57289 extern int fscache_wait_for_deferred_lookup(struct fscache_cookie *);
57290 extern int fscache_wait_for_operation_activation(struct fscache_object *,
57291 struct fscache_operation *,
57292 - atomic_t *,
57293 - atomic_t *,
57294 + atomic_unchecked_t *,
57295 + atomic_unchecked_t *,
57296 void (*)(struct fscache_operation *));
57297 extern void fscache_invalidate_writes(struct fscache_cookie *);
57298
57299 @@ -153,101 +153,101 @@ extern void fscache_proc_cleanup(void);
57300 * stats.c
57301 */
57302 #ifdef CONFIG_FSCACHE_STATS
57303 -extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
57304 -extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
57305 +extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
57306 +extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
57307
57308 -extern atomic_t fscache_n_op_pend;
57309 -extern atomic_t fscache_n_op_run;
57310 -extern atomic_t fscache_n_op_enqueue;
57311 -extern atomic_t fscache_n_op_deferred_release;
57312 -extern atomic_t fscache_n_op_release;
57313 -extern atomic_t fscache_n_op_gc;
57314 -extern atomic_t fscache_n_op_cancelled;
57315 -extern atomic_t fscache_n_op_rejected;
57316 +extern atomic_unchecked_t fscache_n_op_pend;
57317 +extern atomic_unchecked_t fscache_n_op_run;
57318 +extern atomic_unchecked_t fscache_n_op_enqueue;
57319 +extern atomic_unchecked_t fscache_n_op_deferred_release;
57320 +extern atomic_unchecked_t fscache_n_op_release;
57321 +extern atomic_unchecked_t fscache_n_op_gc;
57322 +extern atomic_unchecked_t fscache_n_op_cancelled;
57323 +extern atomic_unchecked_t fscache_n_op_rejected;
57324
57325 -extern atomic_t fscache_n_attr_changed;
57326 -extern atomic_t fscache_n_attr_changed_ok;
57327 -extern atomic_t fscache_n_attr_changed_nobufs;
57328 -extern atomic_t fscache_n_attr_changed_nomem;
57329 -extern atomic_t fscache_n_attr_changed_calls;
57330 +extern atomic_unchecked_t fscache_n_attr_changed;
57331 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
57332 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
57333 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
57334 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
57335
57336 -extern atomic_t fscache_n_allocs;
57337 -extern atomic_t fscache_n_allocs_ok;
57338 -extern atomic_t fscache_n_allocs_wait;
57339 -extern atomic_t fscache_n_allocs_nobufs;
57340 -extern atomic_t fscache_n_allocs_intr;
57341 -extern atomic_t fscache_n_allocs_object_dead;
57342 -extern atomic_t fscache_n_alloc_ops;
57343 -extern atomic_t fscache_n_alloc_op_waits;
57344 +extern atomic_unchecked_t fscache_n_allocs;
57345 +extern atomic_unchecked_t fscache_n_allocs_ok;
57346 +extern atomic_unchecked_t fscache_n_allocs_wait;
57347 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
57348 +extern atomic_unchecked_t fscache_n_allocs_intr;
57349 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
57350 +extern atomic_unchecked_t fscache_n_alloc_ops;
57351 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
57352
57353 -extern atomic_t fscache_n_retrievals;
57354 -extern atomic_t fscache_n_retrievals_ok;
57355 -extern atomic_t fscache_n_retrievals_wait;
57356 -extern atomic_t fscache_n_retrievals_nodata;
57357 -extern atomic_t fscache_n_retrievals_nobufs;
57358 -extern atomic_t fscache_n_retrievals_intr;
57359 -extern atomic_t fscache_n_retrievals_nomem;
57360 -extern atomic_t fscache_n_retrievals_object_dead;
57361 -extern atomic_t fscache_n_retrieval_ops;
57362 -extern atomic_t fscache_n_retrieval_op_waits;
57363 +extern atomic_unchecked_t fscache_n_retrievals;
57364 +extern atomic_unchecked_t fscache_n_retrievals_ok;
57365 +extern atomic_unchecked_t fscache_n_retrievals_wait;
57366 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
57367 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
57368 +extern atomic_unchecked_t fscache_n_retrievals_intr;
57369 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
57370 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
57371 +extern atomic_unchecked_t fscache_n_retrieval_ops;
57372 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
57373
57374 -extern atomic_t fscache_n_stores;
57375 -extern atomic_t fscache_n_stores_ok;
57376 -extern atomic_t fscache_n_stores_again;
57377 -extern atomic_t fscache_n_stores_nobufs;
57378 -extern atomic_t fscache_n_stores_oom;
57379 -extern atomic_t fscache_n_store_ops;
57380 -extern atomic_t fscache_n_store_calls;
57381 -extern atomic_t fscache_n_store_pages;
57382 -extern atomic_t fscache_n_store_radix_deletes;
57383 -extern atomic_t fscache_n_store_pages_over_limit;
57384 +extern atomic_unchecked_t fscache_n_stores;
57385 +extern atomic_unchecked_t fscache_n_stores_ok;
57386 +extern atomic_unchecked_t fscache_n_stores_again;
57387 +extern atomic_unchecked_t fscache_n_stores_nobufs;
57388 +extern atomic_unchecked_t fscache_n_stores_oom;
57389 +extern atomic_unchecked_t fscache_n_store_ops;
57390 +extern atomic_unchecked_t fscache_n_store_calls;
57391 +extern atomic_unchecked_t fscache_n_store_pages;
57392 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
57393 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
57394
57395 -extern atomic_t fscache_n_store_vmscan_not_storing;
57396 -extern atomic_t fscache_n_store_vmscan_gone;
57397 -extern atomic_t fscache_n_store_vmscan_busy;
57398 -extern atomic_t fscache_n_store_vmscan_cancelled;
57399 -extern atomic_t fscache_n_store_vmscan_wait;
57400 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
57401 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
57402 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
57403 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
57404 +extern atomic_unchecked_t fscache_n_store_vmscan_wait;
57405
57406 -extern atomic_t fscache_n_marks;
57407 -extern atomic_t fscache_n_uncaches;
57408 +extern atomic_unchecked_t fscache_n_marks;
57409 +extern atomic_unchecked_t fscache_n_uncaches;
57410
57411 -extern atomic_t fscache_n_acquires;
57412 -extern atomic_t fscache_n_acquires_null;
57413 -extern atomic_t fscache_n_acquires_no_cache;
57414 -extern atomic_t fscache_n_acquires_ok;
57415 -extern atomic_t fscache_n_acquires_nobufs;
57416 -extern atomic_t fscache_n_acquires_oom;
57417 +extern atomic_unchecked_t fscache_n_acquires;
57418 +extern atomic_unchecked_t fscache_n_acquires_null;
57419 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
57420 +extern atomic_unchecked_t fscache_n_acquires_ok;
57421 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
57422 +extern atomic_unchecked_t fscache_n_acquires_oom;
57423
57424 -extern atomic_t fscache_n_invalidates;
57425 -extern atomic_t fscache_n_invalidates_run;
57426 +extern atomic_unchecked_t fscache_n_invalidates;
57427 +extern atomic_unchecked_t fscache_n_invalidates_run;
57428
57429 -extern atomic_t fscache_n_updates;
57430 -extern atomic_t fscache_n_updates_null;
57431 -extern atomic_t fscache_n_updates_run;
57432 +extern atomic_unchecked_t fscache_n_updates;
57433 +extern atomic_unchecked_t fscache_n_updates_null;
57434 +extern atomic_unchecked_t fscache_n_updates_run;
57435
57436 -extern atomic_t fscache_n_relinquishes;
57437 -extern atomic_t fscache_n_relinquishes_null;
57438 -extern atomic_t fscache_n_relinquishes_waitcrt;
57439 -extern atomic_t fscache_n_relinquishes_retire;
57440 +extern atomic_unchecked_t fscache_n_relinquishes;
57441 +extern atomic_unchecked_t fscache_n_relinquishes_null;
57442 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
57443 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
57444
57445 -extern atomic_t fscache_n_cookie_index;
57446 -extern atomic_t fscache_n_cookie_data;
57447 -extern atomic_t fscache_n_cookie_special;
57448 +extern atomic_unchecked_t fscache_n_cookie_index;
57449 +extern atomic_unchecked_t fscache_n_cookie_data;
57450 +extern atomic_unchecked_t fscache_n_cookie_special;
57451
57452 -extern atomic_t fscache_n_object_alloc;
57453 -extern atomic_t fscache_n_object_no_alloc;
57454 -extern atomic_t fscache_n_object_lookups;
57455 -extern atomic_t fscache_n_object_lookups_negative;
57456 -extern atomic_t fscache_n_object_lookups_positive;
57457 -extern atomic_t fscache_n_object_lookups_timed_out;
57458 -extern atomic_t fscache_n_object_created;
57459 -extern atomic_t fscache_n_object_avail;
57460 -extern atomic_t fscache_n_object_dead;
57461 +extern atomic_unchecked_t fscache_n_object_alloc;
57462 +extern atomic_unchecked_t fscache_n_object_no_alloc;
57463 +extern atomic_unchecked_t fscache_n_object_lookups;
57464 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
57465 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
57466 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
57467 +extern atomic_unchecked_t fscache_n_object_created;
57468 +extern atomic_unchecked_t fscache_n_object_avail;
57469 +extern atomic_unchecked_t fscache_n_object_dead;
57470
57471 -extern atomic_t fscache_n_checkaux_none;
57472 -extern atomic_t fscache_n_checkaux_okay;
57473 -extern atomic_t fscache_n_checkaux_update;
57474 -extern atomic_t fscache_n_checkaux_obsolete;
57475 +extern atomic_unchecked_t fscache_n_checkaux_none;
57476 +extern atomic_unchecked_t fscache_n_checkaux_okay;
57477 +extern atomic_unchecked_t fscache_n_checkaux_update;
57478 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
57479
57480 extern atomic_t fscache_n_cop_alloc_object;
57481 extern atomic_t fscache_n_cop_lookup_object;
57482 @@ -272,6 +272,11 @@ static inline void fscache_stat(atomic_t *stat)
57483 atomic_inc(stat);
57484 }
57485
57486 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
57487 +{
57488 + atomic_inc_unchecked(stat);
57489 +}
57490 +
57491 static inline void fscache_stat_d(atomic_t *stat)
57492 {
57493 atomic_dec(stat);
57494 @@ -284,6 +289,7 @@ extern const struct file_operations fscache_stats_fops;
57495
57496 #define __fscache_stat(stat) (NULL)
57497 #define fscache_stat(stat) do {} while (0)
57498 +#define fscache_stat_unchecked(stat) do {} while (0)
57499 #define fscache_stat_d(stat) do {} while (0)
57500 #endif
57501
57502 diff --git a/fs/fscache/object.c b/fs/fscache/object.c
57503 index 86d75a6..5f3d7a0 100644
57504 --- a/fs/fscache/object.c
57505 +++ b/fs/fscache/object.c
57506 @@ -451,7 +451,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
57507 _debug("LOOKUP \"%s\" in \"%s\"",
57508 cookie->def->name, object->cache->tag->name);
57509
57510 - fscache_stat(&fscache_n_object_lookups);
57511 + fscache_stat_unchecked(&fscache_n_object_lookups);
57512 fscache_stat(&fscache_n_cop_lookup_object);
57513 ret = object->cache->ops->lookup_object(object);
57514 fscache_stat_d(&fscache_n_cop_lookup_object);
57515 @@ -461,7 +461,7 @@ static const struct fscache_state *fscache_look_up_object(struct fscache_object
57516 if (ret == -ETIMEDOUT) {
57517 /* probably stuck behind another object, so move this one to
57518 * the back of the queue */
57519 - fscache_stat(&fscache_n_object_lookups_timed_out);
57520 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
57521 _leave(" [timeout]");
57522 return NO_TRANSIT;
57523 }
57524 @@ -489,7 +489,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
57525 _enter("{OBJ%x,%s}", object->debug_id, object->state->name);
57526
57527 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
57528 - fscache_stat(&fscache_n_object_lookups_negative);
57529 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
57530
57531 /* Allow write requests to begin stacking up and read requests to begin
57532 * returning ENODATA.
57533 @@ -523,7 +523,7 @@ void fscache_obtained_object(struct fscache_object *object)
57534 /* if we were still looking up, then we must have a positive lookup
57535 * result, in which case there may be data available */
57536 if (!test_and_set_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags)) {
57537 - fscache_stat(&fscache_n_object_lookups_positive);
57538 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
57539
57540 /* We do (presumably) have data */
57541 clear_bit_unlock(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
57542 @@ -534,7 +534,7 @@ void fscache_obtained_object(struct fscache_object *object)
57543 clear_bit_unlock(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
57544 wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
57545 } else {
57546 - fscache_stat(&fscache_n_object_created);
57547 + fscache_stat_unchecked(&fscache_n_object_created);
57548 }
57549
57550 set_bit(FSCACHE_OBJECT_IS_AVAILABLE, &object->flags);
57551 @@ -570,7 +570,7 @@ static const struct fscache_state *fscache_object_available(struct fscache_objec
57552 fscache_stat_d(&fscache_n_cop_lookup_complete);
57553
57554 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
57555 - fscache_stat(&fscache_n_object_avail);
57556 + fscache_stat_unchecked(&fscache_n_object_avail);
57557
57558 _leave("");
57559 return transit_to(JUMPSTART_DEPS);
57560 @@ -716,7 +716,7 @@ static const struct fscache_state *fscache_drop_object(struct fscache_object *ob
57561
57562 /* this just shifts the object release to the work processor */
57563 fscache_put_object(object);
57564 - fscache_stat(&fscache_n_object_dead);
57565 + fscache_stat_unchecked(&fscache_n_object_dead);
57566
57567 _leave("");
57568 return transit_to(OBJECT_DEAD);
57569 @@ -881,7 +881,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
57570 enum fscache_checkaux result;
57571
57572 if (!object->cookie->def->check_aux) {
57573 - fscache_stat(&fscache_n_checkaux_none);
57574 + fscache_stat_unchecked(&fscache_n_checkaux_none);
57575 return FSCACHE_CHECKAUX_OKAY;
57576 }
57577
57578 @@ -890,17 +890,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
57579 switch (result) {
57580 /* entry okay as is */
57581 case FSCACHE_CHECKAUX_OKAY:
57582 - fscache_stat(&fscache_n_checkaux_okay);
57583 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
57584 break;
57585
57586 /* entry requires update */
57587 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
57588 - fscache_stat(&fscache_n_checkaux_update);
57589 + fscache_stat_unchecked(&fscache_n_checkaux_update);
57590 break;
57591
57592 /* entry requires deletion */
57593 case FSCACHE_CHECKAUX_OBSOLETE:
57594 - fscache_stat(&fscache_n_checkaux_obsolete);
57595 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
57596 break;
57597
57598 default:
57599 @@ -986,7 +986,7 @@ static const struct fscache_state *fscache_invalidate_object(struct fscache_obje
57600 {
57601 const struct fscache_state *s;
57602
57603 - fscache_stat(&fscache_n_invalidates_run);
57604 + fscache_stat_unchecked(&fscache_n_invalidates_run);
57605 fscache_stat(&fscache_n_cop_invalidate_object);
57606 s = _fscache_invalidate_object(object, event);
57607 fscache_stat_d(&fscache_n_cop_invalidate_object);
57608 @@ -1001,7 +1001,7 @@ static const struct fscache_state *fscache_update_object(struct fscache_object *
57609 {
57610 _enter("{OBJ%x},%d", object->debug_id, event);
57611
57612 - fscache_stat(&fscache_n_updates_run);
57613 + fscache_stat_unchecked(&fscache_n_updates_run);
57614 fscache_stat(&fscache_n_cop_update_object);
57615 object->cache->ops->update_object(object);
57616 fscache_stat_d(&fscache_n_cop_update_object);
57617 diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
57618 index 318071a..379938b 100644
57619 --- a/fs/fscache/operation.c
57620 +++ b/fs/fscache/operation.c
57621 @@ -17,7 +17,7 @@
57622 #include <linux/slab.h>
57623 #include "internal.h"
57624
57625 -atomic_t fscache_op_debug_id;
57626 +atomic_unchecked_t fscache_op_debug_id;
57627 EXPORT_SYMBOL(fscache_op_debug_id);
57628
57629 /**
57630 @@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
57631 ASSERTCMP(atomic_read(&op->usage), >, 0);
57632 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
57633
57634 - fscache_stat(&fscache_n_op_enqueue);
57635 + fscache_stat_unchecked(&fscache_n_op_enqueue);
57636 switch (op->flags & FSCACHE_OP_TYPE) {
57637 case FSCACHE_OP_ASYNC:
57638 _debug("queue async");
57639 @@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
57640 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
57641 if (op->processor)
57642 fscache_enqueue_operation(op);
57643 - fscache_stat(&fscache_n_op_run);
57644 + fscache_stat_unchecked(&fscache_n_op_run);
57645 }
57646
57647 /*
57648 @@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
57649 if (object->n_in_progress > 0) {
57650 atomic_inc(&op->usage);
57651 list_add_tail(&op->pend_link, &object->pending_ops);
57652 - fscache_stat(&fscache_n_op_pend);
57653 + fscache_stat_unchecked(&fscache_n_op_pend);
57654 } else if (!list_empty(&object->pending_ops)) {
57655 atomic_inc(&op->usage);
57656 list_add_tail(&op->pend_link, &object->pending_ops);
57657 - fscache_stat(&fscache_n_op_pend);
57658 + fscache_stat_unchecked(&fscache_n_op_pend);
57659 fscache_start_operations(object);
57660 } else {
57661 ASSERTCMP(object->n_in_progress, ==, 0);
57662 @@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
57663 object->n_exclusive++; /* reads and writes must wait */
57664 atomic_inc(&op->usage);
57665 list_add_tail(&op->pend_link, &object->pending_ops);
57666 - fscache_stat(&fscache_n_op_pend);
57667 + fscache_stat_unchecked(&fscache_n_op_pend);
57668 ret = 0;
57669 } else {
57670 /* If we're in any other state, there must have been an I/O
57671 @@ -212,11 +212,11 @@ int fscache_submit_op(struct fscache_object *object,
57672 if (object->n_exclusive > 0) {
57673 atomic_inc(&op->usage);
57674 list_add_tail(&op->pend_link, &object->pending_ops);
57675 - fscache_stat(&fscache_n_op_pend);
57676 + fscache_stat_unchecked(&fscache_n_op_pend);
57677 } else if (!list_empty(&object->pending_ops)) {
57678 atomic_inc(&op->usage);
57679 list_add_tail(&op->pend_link, &object->pending_ops);
57680 - fscache_stat(&fscache_n_op_pend);
57681 + fscache_stat_unchecked(&fscache_n_op_pend);
57682 fscache_start_operations(object);
57683 } else {
57684 ASSERTCMP(object->n_exclusive, ==, 0);
57685 @@ -228,10 +228,10 @@ int fscache_submit_op(struct fscache_object *object,
57686 object->n_ops++;
57687 atomic_inc(&op->usage);
57688 list_add_tail(&op->pend_link, &object->pending_ops);
57689 - fscache_stat(&fscache_n_op_pend);
57690 + fscache_stat_unchecked(&fscache_n_op_pend);
57691 ret = 0;
57692 } else if (fscache_object_is_dying(object)) {
57693 - fscache_stat(&fscache_n_op_rejected);
57694 + fscache_stat_unchecked(&fscache_n_op_rejected);
57695 op->state = FSCACHE_OP_ST_CANCELLED;
57696 ret = -ENOBUFS;
57697 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
57698 @@ -310,7 +310,7 @@ int fscache_cancel_op(struct fscache_operation *op,
57699 ret = -EBUSY;
57700 if (op->state == FSCACHE_OP_ST_PENDING) {
57701 ASSERT(!list_empty(&op->pend_link));
57702 - fscache_stat(&fscache_n_op_cancelled);
57703 + fscache_stat_unchecked(&fscache_n_op_cancelled);
57704 list_del_init(&op->pend_link);
57705 if (do_cancel)
57706 do_cancel(op);
57707 @@ -342,7 +342,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
57708 while (!list_empty(&object->pending_ops)) {
57709 op = list_entry(object->pending_ops.next,
57710 struct fscache_operation, pend_link);
57711 - fscache_stat(&fscache_n_op_cancelled);
57712 + fscache_stat_unchecked(&fscache_n_op_cancelled);
57713 list_del_init(&op->pend_link);
57714
57715 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
57716 @@ -414,7 +414,7 @@ void fscache_put_operation(struct fscache_operation *op)
57717 op->state, ==, FSCACHE_OP_ST_CANCELLED);
57718 op->state = FSCACHE_OP_ST_DEAD;
57719
57720 - fscache_stat(&fscache_n_op_release);
57721 + fscache_stat_unchecked(&fscache_n_op_release);
57722
57723 if (op->release) {
57724 op->release(op);
57725 @@ -433,7 +433,7 @@ void fscache_put_operation(struct fscache_operation *op)
57726 * lock, and defer it otherwise */
57727 if (!spin_trylock(&object->lock)) {
57728 _debug("defer put");
57729 - fscache_stat(&fscache_n_op_deferred_release);
57730 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
57731
57732 cache = object->cache;
57733 spin_lock(&cache->op_gc_list_lock);
57734 @@ -486,7 +486,7 @@ void fscache_operation_gc(struct work_struct *work)
57735
57736 _debug("GC DEFERRED REL OBJ%x OP%x",
57737 object->debug_id, op->debug_id);
57738 - fscache_stat(&fscache_n_op_gc);
57739 + fscache_stat_unchecked(&fscache_n_op_gc);
57740
57741 ASSERTCMP(atomic_read(&op->usage), ==, 0);
57742 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
57743 diff --git a/fs/fscache/page.c b/fs/fscache/page.c
57744 index 73899c1..ae40c58 100644
57745 --- a/fs/fscache/page.c
57746 +++ b/fs/fscache/page.c
57747 @@ -61,7 +61,7 @@ try_again:
57748 val = radix_tree_lookup(&cookie->stores, page->index);
57749 if (!val) {
57750 rcu_read_unlock();
57751 - fscache_stat(&fscache_n_store_vmscan_not_storing);
57752 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
57753 __fscache_uncache_page(cookie, page);
57754 return true;
57755 }
57756 @@ -91,11 +91,11 @@ try_again:
57757 spin_unlock(&cookie->stores_lock);
57758
57759 if (xpage) {
57760 - fscache_stat(&fscache_n_store_vmscan_cancelled);
57761 - fscache_stat(&fscache_n_store_radix_deletes);
57762 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
57763 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
57764 ASSERTCMP(xpage, ==, page);
57765 } else {
57766 - fscache_stat(&fscache_n_store_vmscan_gone);
57767 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
57768 }
57769
57770 wake_up_bit(&cookie->flags, 0);
57771 @@ -110,11 +110,11 @@ page_busy:
57772 * sleeping on memory allocation, so we may need to impose a timeout
57773 * too. */
57774 if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
57775 - fscache_stat(&fscache_n_store_vmscan_busy);
57776 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
57777 return false;
57778 }
57779
57780 - fscache_stat(&fscache_n_store_vmscan_wait);
57781 + fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
57782 __fscache_wait_on_page_write(cookie, page);
57783 gfp &= ~__GFP_WAIT;
57784 goto try_again;
57785 @@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
57786 FSCACHE_COOKIE_STORING_TAG);
57787 if (!radix_tree_tag_get(&cookie->stores, page->index,
57788 FSCACHE_COOKIE_PENDING_TAG)) {
57789 - fscache_stat(&fscache_n_store_radix_deletes);
57790 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
57791 xpage = radix_tree_delete(&cookie->stores, page->index);
57792 }
57793 spin_unlock(&cookie->stores_lock);
57794 @@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
57795
57796 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
57797
57798 - fscache_stat(&fscache_n_attr_changed_calls);
57799 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
57800
57801 if (fscache_object_is_active(object) &&
57802 fscache_use_cookie(object)) {
57803 @@ -189,11 +189,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
57804
57805 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
57806
57807 - fscache_stat(&fscache_n_attr_changed);
57808 + fscache_stat_unchecked(&fscache_n_attr_changed);
57809
57810 op = kzalloc(sizeof(*op), GFP_KERNEL);
57811 if (!op) {
57812 - fscache_stat(&fscache_n_attr_changed_nomem);
57813 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
57814 _leave(" = -ENOMEM");
57815 return -ENOMEM;
57816 }
57817 @@ -211,7 +211,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
57818 if (fscache_submit_exclusive_op(object, op) < 0)
57819 goto nobufs;
57820 spin_unlock(&cookie->lock);
57821 - fscache_stat(&fscache_n_attr_changed_ok);
57822 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
57823 fscache_put_operation(op);
57824 _leave(" = 0");
57825 return 0;
57826 @@ -219,7 +219,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
57827 nobufs:
57828 spin_unlock(&cookie->lock);
57829 kfree(op);
57830 - fscache_stat(&fscache_n_attr_changed_nobufs);
57831 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
57832 _leave(" = %d", -ENOBUFS);
57833 return -ENOBUFS;
57834 }
57835 @@ -258,7 +258,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
57836 /* allocate a retrieval operation and attempt to submit it */
57837 op = kzalloc(sizeof(*op), GFP_NOIO);
57838 if (!op) {
57839 - fscache_stat(&fscache_n_retrievals_nomem);
57840 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
57841 return NULL;
57842 }
57843
57844 @@ -289,13 +289,13 @@ int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
57845 return 0;
57846 }
57847
57848 - fscache_stat(&fscache_n_retrievals_wait);
57849 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
57850
57851 jif = jiffies;
57852 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
57853 fscache_wait_bit_interruptible,
57854 TASK_INTERRUPTIBLE) != 0) {
57855 - fscache_stat(&fscache_n_retrievals_intr);
57856 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
57857 _leave(" = -ERESTARTSYS");
57858 return -ERESTARTSYS;
57859 }
57860 @@ -324,8 +324,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
57861 */
57862 int fscache_wait_for_operation_activation(struct fscache_object *object,
57863 struct fscache_operation *op,
57864 - atomic_t *stat_op_waits,
57865 - atomic_t *stat_object_dead,
57866 + atomic_unchecked_t *stat_op_waits,
57867 + atomic_unchecked_t *stat_object_dead,
57868 void (*do_cancel)(struct fscache_operation *))
57869 {
57870 int ret;
57871 @@ -335,7 +335,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
57872
57873 _debug(">>> WT");
57874 if (stat_op_waits)
57875 - fscache_stat(stat_op_waits);
57876 + fscache_stat_unchecked(stat_op_waits);
57877 if (wait_on_bit(&op->flags, FSCACHE_OP_WAITING,
57878 fscache_wait_bit_interruptible,
57879 TASK_INTERRUPTIBLE) != 0) {
57880 @@ -353,7 +353,7 @@ int fscache_wait_for_operation_activation(struct fscache_object *object,
57881 check_if_dead:
57882 if (op->state == FSCACHE_OP_ST_CANCELLED) {
57883 if (stat_object_dead)
57884 - fscache_stat(stat_object_dead);
57885 + fscache_stat_unchecked(stat_object_dead);
57886 _leave(" = -ENOBUFS [cancelled]");
57887 return -ENOBUFS;
57888 }
57889 @@ -361,7 +361,7 @@ check_if_dead:
57890 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->state);
57891 fscache_cancel_op(op, do_cancel);
57892 if (stat_object_dead)
57893 - fscache_stat(stat_object_dead);
57894 + fscache_stat_unchecked(stat_object_dead);
57895 return -ENOBUFS;
57896 }
57897 return 0;
57898 @@ -388,7 +388,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
57899
57900 _enter("%p,%p,,,", cookie, page);
57901
57902 - fscache_stat(&fscache_n_retrievals);
57903 + fscache_stat_unchecked(&fscache_n_retrievals);
57904
57905 if (hlist_empty(&cookie->backing_objects))
57906 goto nobufs;
57907 @@ -428,7 +428,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
57908 goto nobufs_unlock_dec;
57909 spin_unlock(&cookie->lock);
57910
57911 - fscache_stat(&fscache_n_retrieval_ops);
57912 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
57913
57914 /* pin the netfs read context in case we need to do the actual netfs
57915 * read because we've encountered a cache read failure */
57916 @@ -459,15 +459,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
57917
57918 error:
57919 if (ret == -ENOMEM)
57920 - fscache_stat(&fscache_n_retrievals_nomem);
57921 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
57922 else if (ret == -ERESTARTSYS)
57923 - fscache_stat(&fscache_n_retrievals_intr);
57924 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
57925 else if (ret == -ENODATA)
57926 - fscache_stat(&fscache_n_retrievals_nodata);
57927 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
57928 else if (ret < 0)
57929 - fscache_stat(&fscache_n_retrievals_nobufs);
57930 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
57931 else
57932 - fscache_stat(&fscache_n_retrievals_ok);
57933 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
57934
57935 fscache_put_retrieval(op);
57936 _leave(" = %d", ret);
57937 @@ -480,7 +480,7 @@ nobufs_unlock:
57938 atomic_dec(&cookie->n_active);
57939 kfree(op);
57940 nobufs:
57941 - fscache_stat(&fscache_n_retrievals_nobufs);
57942 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
57943 _leave(" = -ENOBUFS");
57944 return -ENOBUFS;
57945 }
57946 @@ -518,7 +518,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
57947
57948 _enter("%p,,%d,,,", cookie, *nr_pages);
57949
57950 - fscache_stat(&fscache_n_retrievals);
57951 + fscache_stat_unchecked(&fscache_n_retrievals);
57952
57953 if (hlist_empty(&cookie->backing_objects))
57954 goto nobufs;
57955 @@ -554,7 +554,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
57956 goto nobufs_unlock_dec;
57957 spin_unlock(&cookie->lock);
57958
57959 - fscache_stat(&fscache_n_retrieval_ops);
57960 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
57961
57962 /* pin the netfs read context in case we need to do the actual netfs
57963 * read because we've encountered a cache read failure */
57964 @@ -585,15 +585,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
57965
57966 error:
57967 if (ret == -ENOMEM)
57968 - fscache_stat(&fscache_n_retrievals_nomem);
57969 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
57970 else if (ret == -ERESTARTSYS)
57971 - fscache_stat(&fscache_n_retrievals_intr);
57972 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
57973 else if (ret == -ENODATA)
57974 - fscache_stat(&fscache_n_retrievals_nodata);
57975 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
57976 else if (ret < 0)
57977 - fscache_stat(&fscache_n_retrievals_nobufs);
57978 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
57979 else
57980 - fscache_stat(&fscache_n_retrievals_ok);
57981 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
57982
57983 fscache_put_retrieval(op);
57984 _leave(" = %d", ret);
57985 @@ -606,7 +606,7 @@ nobufs_unlock:
57986 atomic_dec(&cookie->n_active);
57987 kfree(op);
57988 nobufs:
57989 - fscache_stat(&fscache_n_retrievals_nobufs);
57990 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
57991 _leave(" = -ENOBUFS");
57992 return -ENOBUFS;
57993 }
57994 @@ -630,7 +630,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
57995
57996 _enter("%p,%p,,,", cookie, page);
57997
57998 - fscache_stat(&fscache_n_allocs);
57999 + fscache_stat_unchecked(&fscache_n_allocs);
58000
58001 if (hlist_empty(&cookie->backing_objects))
58002 goto nobufs;
58003 @@ -662,7 +662,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
58004 goto nobufs_unlock;
58005 spin_unlock(&cookie->lock);
58006
58007 - fscache_stat(&fscache_n_alloc_ops);
58008 + fscache_stat_unchecked(&fscache_n_alloc_ops);
58009
58010 ret = fscache_wait_for_operation_activation(
58011 object, &op->op,
58012 @@ -679,11 +679,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
58013
58014 error:
58015 if (ret == -ERESTARTSYS)
58016 - fscache_stat(&fscache_n_allocs_intr);
58017 + fscache_stat_unchecked(&fscache_n_allocs_intr);
58018 else if (ret < 0)
58019 - fscache_stat(&fscache_n_allocs_nobufs);
58020 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
58021 else
58022 - fscache_stat(&fscache_n_allocs_ok);
58023 + fscache_stat_unchecked(&fscache_n_allocs_ok);
58024
58025 fscache_put_retrieval(op);
58026 _leave(" = %d", ret);
58027 @@ -694,7 +694,7 @@ nobufs_unlock:
58028 atomic_dec(&cookie->n_active);
58029 kfree(op);
58030 nobufs:
58031 - fscache_stat(&fscache_n_allocs_nobufs);
58032 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
58033 _leave(" = -ENOBUFS");
58034 return -ENOBUFS;
58035 }
58036 @@ -770,7 +770,7 @@ static void fscache_write_op(struct fscache_operation *_op)
58037
58038 spin_lock(&cookie->stores_lock);
58039
58040 - fscache_stat(&fscache_n_store_calls);
58041 + fscache_stat_unchecked(&fscache_n_store_calls);
58042
58043 /* find a page to store */
58044 page = NULL;
58045 @@ -781,7 +781,7 @@ static void fscache_write_op(struct fscache_operation *_op)
58046 page = results[0];
58047 _debug("gang %d [%lx]", n, page->index);
58048 if (page->index > op->store_limit) {
58049 - fscache_stat(&fscache_n_store_pages_over_limit);
58050 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
58051 goto superseded;
58052 }
58053
58054 @@ -793,7 +793,7 @@ static void fscache_write_op(struct fscache_operation *_op)
58055 spin_unlock(&cookie->stores_lock);
58056 spin_unlock(&object->lock);
58057
58058 - fscache_stat(&fscache_n_store_pages);
58059 + fscache_stat_unchecked(&fscache_n_store_pages);
58060 fscache_stat(&fscache_n_cop_write_page);
58061 ret = object->cache->ops->write_page(op, page);
58062 fscache_stat_d(&fscache_n_cop_write_page);
58063 @@ -896,7 +896,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
58064 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
58065 ASSERT(PageFsCache(page));
58066
58067 - fscache_stat(&fscache_n_stores);
58068 + fscache_stat_unchecked(&fscache_n_stores);
58069
58070 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
58071 _leave(" = -ENOBUFS [invalidating]");
58072 @@ -954,7 +954,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
58073 spin_unlock(&cookie->stores_lock);
58074 spin_unlock(&object->lock);
58075
58076 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
58077 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58078 op->store_limit = object->store_limit;
58079
58080 atomic_inc(&cookie->n_active);
58081 @@ -963,8 +963,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
58082
58083 spin_unlock(&cookie->lock);
58084 radix_tree_preload_end();
58085 - fscache_stat(&fscache_n_store_ops);
58086 - fscache_stat(&fscache_n_stores_ok);
58087 + fscache_stat_unchecked(&fscache_n_store_ops);
58088 + fscache_stat_unchecked(&fscache_n_stores_ok);
58089
58090 /* the work queue now carries its own ref on the object */
58091 fscache_put_operation(&op->op);
58092 @@ -972,14 +972,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
58093 return 0;
58094
58095 already_queued:
58096 - fscache_stat(&fscache_n_stores_again);
58097 + fscache_stat_unchecked(&fscache_n_stores_again);
58098 already_pending:
58099 spin_unlock(&cookie->stores_lock);
58100 spin_unlock(&object->lock);
58101 spin_unlock(&cookie->lock);
58102 radix_tree_preload_end();
58103 kfree(op);
58104 - fscache_stat(&fscache_n_stores_ok);
58105 + fscache_stat_unchecked(&fscache_n_stores_ok);
58106 _leave(" = 0");
58107 return 0;
58108
58109 @@ -999,14 +999,14 @@ nobufs:
58110 spin_unlock(&cookie->lock);
58111 radix_tree_preload_end();
58112 kfree(op);
58113 - fscache_stat(&fscache_n_stores_nobufs);
58114 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
58115 _leave(" = -ENOBUFS");
58116 return -ENOBUFS;
58117
58118 nomem_free:
58119 kfree(op);
58120 nomem:
58121 - fscache_stat(&fscache_n_stores_oom);
58122 + fscache_stat_unchecked(&fscache_n_stores_oom);
58123 _leave(" = -ENOMEM");
58124 return -ENOMEM;
58125 }
58126 @@ -1024,7 +1024,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
58127 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
58128 ASSERTCMP(page, !=, NULL);
58129
58130 - fscache_stat(&fscache_n_uncaches);
58131 + fscache_stat_unchecked(&fscache_n_uncaches);
58132
58133 /* cache withdrawal may beat us to it */
58134 if (!PageFsCache(page))
58135 @@ -1075,7 +1075,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
58136 struct fscache_cookie *cookie = op->op.object->cookie;
58137
58138 #ifdef CONFIG_FSCACHE_STATS
58139 - atomic_inc(&fscache_n_marks);
58140 + atomic_inc_unchecked(&fscache_n_marks);
58141 #endif
58142
58143 _debug("- mark %p{%lx}", page, page->index);
58144 diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
58145 index 40d13c7..ddf52b9 100644
58146 --- a/fs/fscache/stats.c
58147 +++ b/fs/fscache/stats.c
58148 @@ -18,99 +18,99 @@
58149 /*
58150 * operation counters
58151 */
58152 -atomic_t fscache_n_op_pend;
58153 -atomic_t fscache_n_op_run;
58154 -atomic_t fscache_n_op_enqueue;
58155 -atomic_t fscache_n_op_requeue;
58156 -atomic_t fscache_n_op_deferred_release;
58157 -atomic_t fscache_n_op_release;
58158 -atomic_t fscache_n_op_gc;
58159 -atomic_t fscache_n_op_cancelled;
58160 -atomic_t fscache_n_op_rejected;
58161 +atomic_unchecked_t fscache_n_op_pend;
58162 +atomic_unchecked_t fscache_n_op_run;
58163 +atomic_unchecked_t fscache_n_op_enqueue;
58164 +atomic_unchecked_t fscache_n_op_requeue;
58165 +atomic_unchecked_t fscache_n_op_deferred_release;
58166 +atomic_unchecked_t fscache_n_op_release;
58167 +atomic_unchecked_t fscache_n_op_gc;
58168 +atomic_unchecked_t fscache_n_op_cancelled;
58169 +atomic_unchecked_t fscache_n_op_rejected;
58170
58171 -atomic_t fscache_n_attr_changed;
58172 -atomic_t fscache_n_attr_changed_ok;
58173 -atomic_t fscache_n_attr_changed_nobufs;
58174 -atomic_t fscache_n_attr_changed_nomem;
58175 -atomic_t fscache_n_attr_changed_calls;
58176 +atomic_unchecked_t fscache_n_attr_changed;
58177 +atomic_unchecked_t fscache_n_attr_changed_ok;
58178 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
58179 +atomic_unchecked_t fscache_n_attr_changed_nomem;
58180 +atomic_unchecked_t fscache_n_attr_changed_calls;
58181
58182 -atomic_t fscache_n_allocs;
58183 -atomic_t fscache_n_allocs_ok;
58184 -atomic_t fscache_n_allocs_wait;
58185 -atomic_t fscache_n_allocs_nobufs;
58186 -atomic_t fscache_n_allocs_intr;
58187 -atomic_t fscache_n_allocs_object_dead;
58188 -atomic_t fscache_n_alloc_ops;
58189 -atomic_t fscache_n_alloc_op_waits;
58190 +atomic_unchecked_t fscache_n_allocs;
58191 +atomic_unchecked_t fscache_n_allocs_ok;
58192 +atomic_unchecked_t fscache_n_allocs_wait;
58193 +atomic_unchecked_t fscache_n_allocs_nobufs;
58194 +atomic_unchecked_t fscache_n_allocs_intr;
58195 +atomic_unchecked_t fscache_n_allocs_object_dead;
58196 +atomic_unchecked_t fscache_n_alloc_ops;
58197 +atomic_unchecked_t fscache_n_alloc_op_waits;
58198
58199 -atomic_t fscache_n_retrievals;
58200 -atomic_t fscache_n_retrievals_ok;
58201 -atomic_t fscache_n_retrievals_wait;
58202 -atomic_t fscache_n_retrievals_nodata;
58203 -atomic_t fscache_n_retrievals_nobufs;
58204 -atomic_t fscache_n_retrievals_intr;
58205 -atomic_t fscache_n_retrievals_nomem;
58206 -atomic_t fscache_n_retrievals_object_dead;
58207 -atomic_t fscache_n_retrieval_ops;
58208 -atomic_t fscache_n_retrieval_op_waits;
58209 +atomic_unchecked_t fscache_n_retrievals;
58210 +atomic_unchecked_t fscache_n_retrievals_ok;
58211 +atomic_unchecked_t fscache_n_retrievals_wait;
58212 +atomic_unchecked_t fscache_n_retrievals_nodata;
58213 +atomic_unchecked_t fscache_n_retrievals_nobufs;
58214 +atomic_unchecked_t fscache_n_retrievals_intr;
58215 +atomic_unchecked_t fscache_n_retrievals_nomem;
58216 +atomic_unchecked_t fscache_n_retrievals_object_dead;
58217 +atomic_unchecked_t fscache_n_retrieval_ops;
58218 +atomic_unchecked_t fscache_n_retrieval_op_waits;
58219
58220 -atomic_t fscache_n_stores;
58221 -atomic_t fscache_n_stores_ok;
58222 -atomic_t fscache_n_stores_again;
58223 -atomic_t fscache_n_stores_nobufs;
58224 -atomic_t fscache_n_stores_oom;
58225 -atomic_t fscache_n_store_ops;
58226 -atomic_t fscache_n_store_calls;
58227 -atomic_t fscache_n_store_pages;
58228 -atomic_t fscache_n_store_radix_deletes;
58229 -atomic_t fscache_n_store_pages_over_limit;
58230 +atomic_unchecked_t fscache_n_stores;
58231 +atomic_unchecked_t fscache_n_stores_ok;
58232 +atomic_unchecked_t fscache_n_stores_again;
58233 +atomic_unchecked_t fscache_n_stores_nobufs;
58234 +atomic_unchecked_t fscache_n_stores_oom;
58235 +atomic_unchecked_t fscache_n_store_ops;
58236 +atomic_unchecked_t fscache_n_store_calls;
58237 +atomic_unchecked_t fscache_n_store_pages;
58238 +atomic_unchecked_t fscache_n_store_radix_deletes;
58239 +atomic_unchecked_t fscache_n_store_pages_over_limit;
58240
58241 -atomic_t fscache_n_store_vmscan_not_storing;
58242 -atomic_t fscache_n_store_vmscan_gone;
58243 -atomic_t fscache_n_store_vmscan_busy;
58244 -atomic_t fscache_n_store_vmscan_cancelled;
58245 -atomic_t fscache_n_store_vmscan_wait;
58246 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
58247 +atomic_unchecked_t fscache_n_store_vmscan_gone;
58248 +atomic_unchecked_t fscache_n_store_vmscan_busy;
58249 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
58250 +atomic_unchecked_t fscache_n_store_vmscan_wait;
58251
58252 -atomic_t fscache_n_marks;
58253 -atomic_t fscache_n_uncaches;
58254 +atomic_unchecked_t fscache_n_marks;
58255 +atomic_unchecked_t fscache_n_uncaches;
58256
58257 -atomic_t fscache_n_acquires;
58258 -atomic_t fscache_n_acquires_null;
58259 -atomic_t fscache_n_acquires_no_cache;
58260 -atomic_t fscache_n_acquires_ok;
58261 -atomic_t fscache_n_acquires_nobufs;
58262 -atomic_t fscache_n_acquires_oom;
58263 +atomic_unchecked_t fscache_n_acquires;
58264 +atomic_unchecked_t fscache_n_acquires_null;
58265 +atomic_unchecked_t fscache_n_acquires_no_cache;
58266 +atomic_unchecked_t fscache_n_acquires_ok;
58267 +atomic_unchecked_t fscache_n_acquires_nobufs;
58268 +atomic_unchecked_t fscache_n_acquires_oom;
58269
58270 -atomic_t fscache_n_invalidates;
58271 -atomic_t fscache_n_invalidates_run;
58272 +atomic_unchecked_t fscache_n_invalidates;
58273 +atomic_unchecked_t fscache_n_invalidates_run;
58274
58275 -atomic_t fscache_n_updates;
58276 -atomic_t fscache_n_updates_null;
58277 -atomic_t fscache_n_updates_run;
58278 +atomic_unchecked_t fscache_n_updates;
58279 +atomic_unchecked_t fscache_n_updates_null;
58280 +atomic_unchecked_t fscache_n_updates_run;
58281
58282 -atomic_t fscache_n_relinquishes;
58283 -atomic_t fscache_n_relinquishes_null;
58284 -atomic_t fscache_n_relinquishes_waitcrt;
58285 -atomic_t fscache_n_relinquishes_retire;
58286 +atomic_unchecked_t fscache_n_relinquishes;
58287 +atomic_unchecked_t fscache_n_relinquishes_null;
58288 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
58289 +atomic_unchecked_t fscache_n_relinquishes_retire;
58290
58291 -atomic_t fscache_n_cookie_index;
58292 -atomic_t fscache_n_cookie_data;
58293 -atomic_t fscache_n_cookie_special;
58294 +atomic_unchecked_t fscache_n_cookie_index;
58295 +atomic_unchecked_t fscache_n_cookie_data;
58296 +atomic_unchecked_t fscache_n_cookie_special;
58297
58298 -atomic_t fscache_n_object_alloc;
58299 -atomic_t fscache_n_object_no_alloc;
58300 -atomic_t fscache_n_object_lookups;
58301 -atomic_t fscache_n_object_lookups_negative;
58302 -atomic_t fscache_n_object_lookups_positive;
58303 -atomic_t fscache_n_object_lookups_timed_out;
58304 -atomic_t fscache_n_object_created;
58305 -atomic_t fscache_n_object_avail;
58306 -atomic_t fscache_n_object_dead;
58307 +atomic_unchecked_t fscache_n_object_alloc;
58308 +atomic_unchecked_t fscache_n_object_no_alloc;
58309 +atomic_unchecked_t fscache_n_object_lookups;
58310 +atomic_unchecked_t fscache_n_object_lookups_negative;
58311 +atomic_unchecked_t fscache_n_object_lookups_positive;
58312 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
58313 +atomic_unchecked_t fscache_n_object_created;
58314 +atomic_unchecked_t fscache_n_object_avail;
58315 +atomic_unchecked_t fscache_n_object_dead;
58316
58317 -atomic_t fscache_n_checkaux_none;
58318 -atomic_t fscache_n_checkaux_okay;
58319 -atomic_t fscache_n_checkaux_update;
58320 -atomic_t fscache_n_checkaux_obsolete;
58321 +atomic_unchecked_t fscache_n_checkaux_none;
58322 +atomic_unchecked_t fscache_n_checkaux_okay;
58323 +atomic_unchecked_t fscache_n_checkaux_update;
58324 +atomic_unchecked_t fscache_n_checkaux_obsolete;
58325
58326 atomic_t fscache_n_cop_alloc_object;
58327 atomic_t fscache_n_cop_lookup_object;
58328 @@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
58329 seq_puts(m, "FS-Cache statistics\n");
58330
58331 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
58332 - atomic_read(&fscache_n_cookie_index),
58333 - atomic_read(&fscache_n_cookie_data),
58334 - atomic_read(&fscache_n_cookie_special));
58335 + atomic_read_unchecked(&fscache_n_cookie_index),
58336 + atomic_read_unchecked(&fscache_n_cookie_data),
58337 + atomic_read_unchecked(&fscache_n_cookie_special));
58338
58339 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
58340 - atomic_read(&fscache_n_object_alloc),
58341 - atomic_read(&fscache_n_object_no_alloc),
58342 - atomic_read(&fscache_n_object_avail),
58343 - atomic_read(&fscache_n_object_dead));
58344 + atomic_read_unchecked(&fscache_n_object_alloc),
58345 + atomic_read_unchecked(&fscache_n_object_no_alloc),
58346 + atomic_read_unchecked(&fscache_n_object_avail),
58347 + atomic_read_unchecked(&fscache_n_object_dead));
58348 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
58349 - atomic_read(&fscache_n_checkaux_none),
58350 - atomic_read(&fscache_n_checkaux_okay),
58351 - atomic_read(&fscache_n_checkaux_update),
58352 - atomic_read(&fscache_n_checkaux_obsolete));
58353 + atomic_read_unchecked(&fscache_n_checkaux_none),
58354 + atomic_read_unchecked(&fscache_n_checkaux_okay),
58355 + atomic_read_unchecked(&fscache_n_checkaux_update),
58356 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
58357
58358 seq_printf(m, "Pages : mrk=%u unc=%u\n",
58359 - atomic_read(&fscache_n_marks),
58360 - atomic_read(&fscache_n_uncaches));
58361 + atomic_read_unchecked(&fscache_n_marks),
58362 + atomic_read_unchecked(&fscache_n_uncaches));
58363
58364 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
58365 " oom=%u\n",
58366 - atomic_read(&fscache_n_acquires),
58367 - atomic_read(&fscache_n_acquires_null),
58368 - atomic_read(&fscache_n_acquires_no_cache),
58369 - atomic_read(&fscache_n_acquires_ok),
58370 - atomic_read(&fscache_n_acquires_nobufs),
58371 - atomic_read(&fscache_n_acquires_oom));
58372 + atomic_read_unchecked(&fscache_n_acquires),
58373 + atomic_read_unchecked(&fscache_n_acquires_null),
58374 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
58375 + atomic_read_unchecked(&fscache_n_acquires_ok),
58376 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
58377 + atomic_read_unchecked(&fscache_n_acquires_oom));
58378
58379 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
58380 - atomic_read(&fscache_n_object_lookups),
58381 - atomic_read(&fscache_n_object_lookups_negative),
58382 - atomic_read(&fscache_n_object_lookups_positive),
58383 - atomic_read(&fscache_n_object_created),
58384 - atomic_read(&fscache_n_object_lookups_timed_out));
58385 + atomic_read_unchecked(&fscache_n_object_lookups),
58386 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
58387 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
58388 + atomic_read_unchecked(&fscache_n_object_created),
58389 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
58390
58391 seq_printf(m, "Invals : n=%u run=%u\n",
58392 - atomic_read(&fscache_n_invalidates),
58393 - atomic_read(&fscache_n_invalidates_run));
58394 + atomic_read_unchecked(&fscache_n_invalidates),
58395 + atomic_read_unchecked(&fscache_n_invalidates_run));
58396
58397 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
58398 - atomic_read(&fscache_n_updates),
58399 - atomic_read(&fscache_n_updates_null),
58400 - atomic_read(&fscache_n_updates_run));
58401 + atomic_read_unchecked(&fscache_n_updates),
58402 + atomic_read_unchecked(&fscache_n_updates_null),
58403 + atomic_read_unchecked(&fscache_n_updates_run));
58404
58405 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
58406 - atomic_read(&fscache_n_relinquishes),
58407 - atomic_read(&fscache_n_relinquishes_null),
58408 - atomic_read(&fscache_n_relinquishes_waitcrt),
58409 - atomic_read(&fscache_n_relinquishes_retire));
58410 + atomic_read_unchecked(&fscache_n_relinquishes),
58411 + atomic_read_unchecked(&fscache_n_relinquishes_null),
58412 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
58413 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
58414
58415 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
58416 - atomic_read(&fscache_n_attr_changed),
58417 - atomic_read(&fscache_n_attr_changed_ok),
58418 - atomic_read(&fscache_n_attr_changed_nobufs),
58419 - atomic_read(&fscache_n_attr_changed_nomem),
58420 - atomic_read(&fscache_n_attr_changed_calls));
58421 + atomic_read_unchecked(&fscache_n_attr_changed),
58422 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
58423 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
58424 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
58425 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
58426
58427 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
58428 - atomic_read(&fscache_n_allocs),
58429 - atomic_read(&fscache_n_allocs_ok),
58430 - atomic_read(&fscache_n_allocs_wait),
58431 - atomic_read(&fscache_n_allocs_nobufs),
58432 - atomic_read(&fscache_n_allocs_intr));
58433 + atomic_read_unchecked(&fscache_n_allocs),
58434 + atomic_read_unchecked(&fscache_n_allocs_ok),
58435 + atomic_read_unchecked(&fscache_n_allocs_wait),
58436 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
58437 + atomic_read_unchecked(&fscache_n_allocs_intr));
58438 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
58439 - atomic_read(&fscache_n_alloc_ops),
58440 - atomic_read(&fscache_n_alloc_op_waits),
58441 - atomic_read(&fscache_n_allocs_object_dead));
58442 + atomic_read_unchecked(&fscache_n_alloc_ops),
58443 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
58444 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
58445
58446 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
58447 " int=%u oom=%u\n",
58448 - atomic_read(&fscache_n_retrievals),
58449 - atomic_read(&fscache_n_retrievals_ok),
58450 - atomic_read(&fscache_n_retrievals_wait),
58451 - atomic_read(&fscache_n_retrievals_nodata),
58452 - atomic_read(&fscache_n_retrievals_nobufs),
58453 - atomic_read(&fscache_n_retrievals_intr),
58454 - atomic_read(&fscache_n_retrievals_nomem));
58455 + atomic_read_unchecked(&fscache_n_retrievals),
58456 + atomic_read_unchecked(&fscache_n_retrievals_ok),
58457 + atomic_read_unchecked(&fscache_n_retrievals_wait),
58458 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
58459 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
58460 + atomic_read_unchecked(&fscache_n_retrievals_intr),
58461 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
58462 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
58463 - atomic_read(&fscache_n_retrieval_ops),
58464 - atomic_read(&fscache_n_retrieval_op_waits),
58465 - atomic_read(&fscache_n_retrievals_object_dead));
58466 + atomic_read_unchecked(&fscache_n_retrieval_ops),
58467 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
58468 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
58469
58470 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
58471 - atomic_read(&fscache_n_stores),
58472 - atomic_read(&fscache_n_stores_ok),
58473 - atomic_read(&fscache_n_stores_again),
58474 - atomic_read(&fscache_n_stores_nobufs),
58475 - atomic_read(&fscache_n_stores_oom));
58476 + atomic_read_unchecked(&fscache_n_stores),
58477 + atomic_read_unchecked(&fscache_n_stores_ok),
58478 + atomic_read_unchecked(&fscache_n_stores_again),
58479 + atomic_read_unchecked(&fscache_n_stores_nobufs),
58480 + atomic_read_unchecked(&fscache_n_stores_oom));
58481 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
58482 - atomic_read(&fscache_n_store_ops),
58483 - atomic_read(&fscache_n_store_calls),
58484 - atomic_read(&fscache_n_store_pages),
58485 - atomic_read(&fscache_n_store_radix_deletes),
58486 - atomic_read(&fscache_n_store_pages_over_limit));
58487 + atomic_read_unchecked(&fscache_n_store_ops),
58488 + atomic_read_unchecked(&fscache_n_store_calls),
58489 + atomic_read_unchecked(&fscache_n_store_pages),
58490 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
58491 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
58492
58493 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
58494 - atomic_read(&fscache_n_store_vmscan_not_storing),
58495 - atomic_read(&fscache_n_store_vmscan_gone),
58496 - atomic_read(&fscache_n_store_vmscan_busy),
58497 - atomic_read(&fscache_n_store_vmscan_cancelled),
58498 - atomic_read(&fscache_n_store_vmscan_wait));
58499 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
58500 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
58501 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
58502 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
58503 + atomic_read_unchecked(&fscache_n_store_vmscan_wait));
58504
58505 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
58506 - atomic_read(&fscache_n_op_pend),
58507 - atomic_read(&fscache_n_op_run),
58508 - atomic_read(&fscache_n_op_enqueue),
58509 - atomic_read(&fscache_n_op_cancelled),
58510 - atomic_read(&fscache_n_op_rejected));
58511 + atomic_read_unchecked(&fscache_n_op_pend),
58512 + atomic_read_unchecked(&fscache_n_op_run),
58513 + atomic_read_unchecked(&fscache_n_op_enqueue),
58514 + atomic_read_unchecked(&fscache_n_op_cancelled),
58515 + atomic_read_unchecked(&fscache_n_op_rejected));
58516 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
58517 - atomic_read(&fscache_n_op_deferred_release),
58518 - atomic_read(&fscache_n_op_release),
58519 - atomic_read(&fscache_n_op_gc));
58520 + atomic_read_unchecked(&fscache_n_op_deferred_release),
58521 + atomic_read_unchecked(&fscache_n_op_release),
58522 + atomic_read_unchecked(&fscache_n_op_gc));
58523
58524 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
58525 atomic_read(&fscache_n_cop_alloc_object),
58526 diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
58527 index adbfd66..4b25822 100644
58528 --- a/fs/fuse/cuse.c
58529 +++ b/fs/fuse/cuse.c
58530 @@ -603,10 +603,12 @@ static int __init cuse_init(void)
58531 INIT_LIST_HEAD(&cuse_conntbl[i]);
58532
58533 /* inherit and extend fuse_dev_operations */
58534 - cuse_channel_fops = fuse_dev_operations;
58535 - cuse_channel_fops.owner = THIS_MODULE;
58536 - cuse_channel_fops.open = cuse_channel_open;
58537 - cuse_channel_fops.release = cuse_channel_release;
58538 + pax_open_kernel();
58539 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
58540 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
58541 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
58542 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
58543 + pax_close_kernel();
58544
58545 cuse_class = class_create(THIS_MODULE, "cuse");
58546 if (IS_ERR(cuse_class))
58547 diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
58548 index ef74ad5..c9ac759e 100644
58549 --- a/fs/fuse/dev.c
58550 +++ b/fs/fuse/dev.c
58551 @@ -1339,7 +1339,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
58552 ret = 0;
58553 pipe_lock(pipe);
58554
58555 - if (!pipe->readers) {
58556 + if (!atomic_read(&pipe->readers)) {
58557 send_sig(SIGPIPE, current, 0);
58558 if (!ret)
58559 ret = -EPIPE;
58560 @@ -1364,7 +1364,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
58561 page_nr++;
58562 ret += buf->len;
58563
58564 - if (pipe->files)
58565 + if (atomic_read(&pipe->files))
58566 do_wakeup = 1;
58567 }
58568
58569 diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
58570 index b7989f2..1f72ec4 100644
58571 --- a/fs/fuse/dir.c
58572 +++ b/fs/fuse/dir.c
58573 @@ -1438,7 +1438,7 @@ static char *read_link(struct dentry *dentry)
58574 return link;
58575 }
58576
58577 -static void free_link(char *link)
58578 +static void free_link(const char *link)
58579 {
58580 if (!IS_ERR(link))
58581 free_page((unsigned long) link);
58582 diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
58583 index 1298766..c964c60 100644
58584 --- a/fs/gfs2/inode.c
58585 +++ b/fs/gfs2/inode.c
58586 @@ -1515,7 +1515,7 @@ out:
58587
58588 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
58589 {
58590 - char *s = nd_get_link(nd);
58591 + const char *s = nd_get_link(nd);
58592 if (!IS_ERR(s))
58593 kfree(s);
58594 }
58595 diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
58596 index 2543728..14d7bd4 100644
58597 --- a/fs/hostfs/hostfs_kern.c
58598 +++ b/fs/hostfs/hostfs_kern.c
58599 @@ -904,7 +904,7 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
58600
58601 static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
58602 {
58603 - char *s = nd_get_link(nd);
58604 + const char *s = nd_get_link(nd);
58605 if (!IS_ERR(s))
58606 __putname(s);
58607 }
58608 diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
58609 index d19b30a..ef89c36 100644
58610 --- a/fs/hugetlbfs/inode.c
58611 +++ b/fs/hugetlbfs/inode.c
58612 @@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
58613 struct mm_struct *mm = current->mm;
58614 struct vm_area_struct *vma;
58615 struct hstate *h = hstate_file(file);
58616 + unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
58617 struct vm_unmapped_area_info info;
58618
58619 if (len & ~huge_page_mask(h))
58620 @@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
58621 return addr;
58622 }
58623
58624 +#ifdef CONFIG_PAX_RANDMMAP
58625 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
58626 +#endif
58627 +
58628 if (addr) {
58629 addr = ALIGN(addr, huge_page_size(h));
58630 vma = find_vma(mm, addr);
58631 - if (TASK_SIZE - len >= addr &&
58632 - (!vma || addr + len <= vma->vm_start))
58633 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
58634 return addr;
58635 }
58636
58637 info.flags = 0;
58638 info.length = len;
58639 info.low_limit = TASK_UNMAPPED_BASE;
58640 +
58641 +#ifdef CONFIG_PAX_RANDMMAP
58642 + if (mm->pax_flags & MF_PAX_RANDMMAP)
58643 + info.low_limit += mm->delta_mmap;
58644 +#endif
58645 +
58646 info.high_limit = TASK_SIZE;
58647 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
58648 info.align_offset = 0;
58649 @@ -908,7 +918,7 @@ static struct file_system_type hugetlbfs_fs_type = {
58650 };
58651 MODULE_ALIAS_FS("hugetlbfs");
58652
58653 -static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
58654 +struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
58655
58656 static int can_do_hugetlb_shm(void)
58657 {
58658 diff --git a/fs/inode.c b/fs/inode.c
58659 index b33ba8e..3c79a47 100644
58660 --- a/fs/inode.c
58661 +++ b/fs/inode.c
58662 @@ -849,8 +849,8 @@ unsigned int get_next_ino(void)
58663
58664 #ifdef CONFIG_SMP
58665 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
58666 - static atomic_t shared_last_ino;
58667 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
58668 + static atomic_unchecked_t shared_last_ino;
58669 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
58670
58671 res = next - LAST_INO_BATCH;
58672 }
58673 diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
58674 index 4a6cf28..d3a29d3 100644
58675 --- a/fs/jffs2/erase.c
58676 +++ b/fs/jffs2/erase.c
58677 @@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
58678 struct jffs2_unknown_node marker = {
58679 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
58680 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
58681 - .totlen = cpu_to_je32(c->cleanmarker_size)
58682 + .totlen = cpu_to_je32(c->cleanmarker_size),
58683 + .hdr_crc = cpu_to_je32(0)
58684 };
58685
58686 jffs2_prealloc_raw_node_refs(c, jeb, 1);
58687 diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
58688 index a6597d6..41b30ec 100644
58689 --- a/fs/jffs2/wbuf.c
58690 +++ b/fs/jffs2/wbuf.c
58691 @@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
58692 {
58693 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
58694 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
58695 - .totlen = constant_cpu_to_je32(8)
58696 + .totlen = constant_cpu_to_je32(8),
58697 + .hdr_crc = constant_cpu_to_je32(0)
58698 };
58699
58700 /*
58701 diff --git a/fs/jfs/super.c b/fs/jfs/super.c
58702 index 6669aa2..36b033d 100644
58703 --- a/fs/jfs/super.c
58704 +++ b/fs/jfs/super.c
58705 @@ -882,7 +882,7 @@ static int __init init_jfs_fs(void)
58706
58707 jfs_inode_cachep =
58708 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
58709 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
58710 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
58711 init_once);
58712 if (jfs_inode_cachep == NULL)
58713 return -ENOMEM;
58714 diff --git a/fs/libfs.c b/fs/libfs.c
58715 index 193e0c2..7404665 100644
58716 --- a/fs/libfs.c
58717 +++ b/fs/libfs.c
58718 @@ -150,6 +150,9 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
58719
58720 for (p = q->next; p != &dentry->d_subdirs; p = p->next) {
58721 struct dentry *next = list_entry(p, struct dentry, d_u.d_child);
58722 + char d_name[sizeof(next->d_iname)];
58723 + const unsigned char *name;
58724 +
58725 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
58726 if (!simple_positive(next)) {
58727 spin_unlock(&next->d_lock);
58728 @@ -158,7 +161,12 @@ int dcache_readdir(struct file *file, struct dir_context *ctx)
58729
58730 spin_unlock(&next->d_lock);
58731 spin_unlock(&dentry->d_lock);
58732 - if (!dir_emit(ctx, next->d_name.name, next->d_name.len,
58733 + name = next->d_name.name;
58734 + if (name == next->d_iname) {
58735 + memcpy(d_name, name, next->d_name.len);
58736 + name = d_name;
58737 + }
58738 + if (!dir_emit(ctx, name, next->d_name.len,
58739 next->d_inode->i_ino, dt_type(next->d_inode)))
58740 return 0;
58741 spin_lock(&dentry->d_lock);
58742 diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
58743 index acd3947..1f896e2 100644
58744 --- a/fs/lockd/clntproc.c
58745 +++ b/fs/lockd/clntproc.c
58746 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
58747 /*
58748 * Cookie counter for NLM requests
58749 */
58750 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
58751 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
58752
58753 void nlmclnt_next_cookie(struct nlm_cookie *c)
58754 {
58755 - u32 cookie = atomic_inc_return(&nlm_cookie);
58756 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
58757
58758 memcpy(c->data, &cookie, 4);
58759 c->len=4;
58760 diff --git a/fs/locks.c b/fs/locks.c
58761 index b27a300..4156d0b 100644
58762 --- a/fs/locks.c
58763 +++ b/fs/locks.c
58764 @@ -2183,16 +2183,16 @@ void locks_remove_flock(struct file *filp)
58765 return;
58766
58767 if (filp->f_op && filp->f_op->flock) {
58768 - struct file_lock fl = {
58769 + struct file_lock flock = {
58770 .fl_pid = current->tgid,
58771 .fl_file = filp,
58772 .fl_flags = FL_FLOCK,
58773 .fl_type = F_UNLCK,
58774 .fl_end = OFFSET_MAX,
58775 };
58776 - filp->f_op->flock(filp, F_SETLKW, &fl);
58777 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
58778 - fl.fl_ops->fl_release_private(&fl);
58779 + filp->f_op->flock(filp, F_SETLKW, &flock);
58780 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
58781 + flock.fl_ops->fl_release_private(&flock);
58782 }
58783
58784 spin_lock(&inode->i_lock);
58785 diff --git a/fs/namei.c b/fs/namei.c
58786 index 23ac50f..c6757a5 100644
58787 --- a/fs/namei.c
58788 +++ b/fs/namei.c
58789 @@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
58790 if (ret != -EACCES)
58791 return ret;
58792
58793 +#ifdef CONFIG_GRKERNSEC
58794 + /* we'll block if we have to log due to a denied capability use */
58795 + if (mask & MAY_NOT_BLOCK)
58796 + return -ECHILD;
58797 +#endif
58798 +
58799 if (S_ISDIR(inode->i_mode)) {
58800 /* DACs are overridable for directories */
58801 - if (inode_capable(inode, CAP_DAC_OVERRIDE))
58802 - return 0;
58803 if (!(mask & MAY_WRITE))
58804 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
58805 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
58806 + inode_capable(inode, CAP_DAC_READ_SEARCH))
58807 return 0;
58808 + if (inode_capable(inode, CAP_DAC_OVERRIDE))
58809 + return 0;
58810 return -EACCES;
58811 }
58812 /*
58813 + * Searching includes executable on directories, else just read.
58814 + */
58815 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
58816 + if (mask == MAY_READ)
58817 + if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
58818 + inode_capable(inode, CAP_DAC_READ_SEARCH))
58819 + return 0;
58820 +
58821 + /*
58822 * Read/write DACs are always overridable.
58823 * Executable DACs are overridable when there is
58824 * at least one exec bit set.
58825 @@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
58826 if (inode_capable(inode, CAP_DAC_OVERRIDE))
58827 return 0;
58828
58829 - /*
58830 - * Searching includes executable on directories, else just read.
58831 - */
58832 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
58833 - if (mask == MAY_READ)
58834 - if (inode_capable(inode, CAP_DAC_READ_SEARCH))
58835 - return 0;
58836 -
58837 return -EACCES;
58838 }
58839
58840 @@ -821,7 +829,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
58841 {
58842 struct dentry *dentry = link->dentry;
58843 int error;
58844 - char *s;
58845 + const char *s;
58846
58847 BUG_ON(nd->flags & LOOKUP_RCU);
58848
58849 @@ -842,6 +850,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
58850 if (error)
58851 goto out_put_nd_path;
58852
58853 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
58854 + dentry->d_inode, dentry, nd->path.mnt)) {
58855 + error = -EACCES;
58856 + goto out_put_nd_path;
58857 + }
58858 +
58859 nd->last_type = LAST_BIND;
58860 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
58861 error = PTR_ERR(*p);
58862 @@ -1602,6 +1616,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
58863 if (res)
58864 break;
58865 res = walk_component(nd, path, LOOKUP_FOLLOW);
58866 + if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
58867 + res = -EACCES;
58868 put_link(nd, &link, cookie);
58869 } while (res > 0);
58870
58871 @@ -1700,7 +1716,7 @@ EXPORT_SYMBOL(full_name_hash);
58872 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
58873 {
58874 unsigned long a, b, adata, bdata, mask, hash, len;
58875 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
58876 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
58877
58878 hash = a = 0;
58879 len = -sizeof(unsigned long);
58880 @@ -1981,6 +1997,8 @@ static int path_lookupat(int dfd, const char *name,
58881 if (err)
58882 break;
58883 err = lookup_last(nd, &path);
58884 + if (!err && gr_handle_symlink_owner(&link, nd->inode))
58885 + err = -EACCES;
58886 put_link(nd, &link, cookie);
58887 }
58888 }
58889 @@ -1988,6 +2006,13 @@ static int path_lookupat(int dfd, const char *name,
58890 if (!err)
58891 err = complete_walk(nd);
58892
58893 + if (!err && !(nd->flags & LOOKUP_PARENT)) {
58894 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
58895 + path_put(&nd->path);
58896 + err = -ENOENT;
58897 + }
58898 + }
58899 +
58900 if (!err && nd->flags & LOOKUP_DIRECTORY) {
58901 if (!can_lookup(nd->inode)) {
58902 path_put(&nd->path);
58903 @@ -2015,8 +2040,15 @@ static int filename_lookup(int dfd, struct filename *name,
58904 retval = path_lookupat(dfd, name->name,
58905 flags | LOOKUP_REVAL, nd);
58906
58907 - if (likely(!retval))
58908 + if (likely(!retval)) {
58909 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
58910 + if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
58911 + if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
58912 + path_put(&nd->path);
58913 + return -ENOENT;
58914 + }
58915 + }
58916 + }
58917 return retval;
58918 }
58919
58920 @@ -2587,6 +2619,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
58921 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
58922 return -EPERM;
58923
58924 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
58925 + return -EPERM;
58926 + if (gr_handle_rawio(inode))
58927 + return -EPERM;
58928 + if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
58929 + return -EACCES;
58930 +
58931 return 0;
58932 }
58933
58934 @@ -2818,7 +2857,7 @@ looked_up:
58935 * cleared otherwise prior to returning.
58936 */
58937 static int lookup_open(struct nameidata *nd, struct path *path,
58938 - struct file *file,
58939 + struct path *link, struct file *file,
58940 const struct open_flags *op,
58941 bool got_write, int *opened)
58942 {
58943 @@ -2853,6 +2892,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
58944 /* Negative dentry, just create the file */
58945 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
58946 umode_t mode = op->mode;
58947 +
58948 + if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
58949 + error = -EACCES;
58950 + goto out_dput;
58951 + }
58952 +
58953 + if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
58954 + error = -EACCES;
58955 + goto out_dput;
58956 + }
58957 +
58958 if (!IS_POSIXACL(dir->d_inode))
58959 mode &= ~current_umask();
58960 /*
58961 @@ -2874,6 +2924,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
58962 nd->flags & LOOKUP_EXCL);
58963 if (error)
58964 goto out_dput;
58965 + else
58966 + gr_handle_create(dentry, nd->path.mnt);
58967 }
58968 out_no_open:
58969 path->dentry = dentry;
58970 @@ -2888,7 +2940,7 @@ out_dput:
58971 /*
58972 * Handle the last step of open()
58973 */
58974 -static int do_last(struct nameidata *nd, struct path *path,
58975 +static int do_last(struct nameidata *nd, struct path *path, struct path *link,
58976 struct file *file, const struct open_flags *op,
58977 int *opened, struct filename *name)
58978 {
58979 @@ -2938,6 +2990,15 @@ static int do_last(struct nameidata *nd, struct path *path,
58980 if (error)
58981 return error;
58982
58983 + if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
58984 + error = -ENOENT;
58985 + goto out;
58986 + }
58987 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
58988 + error = -EACCES;
58989 + goto out;
58990 + }
58991 +
58992 audit_inode(name, dir, LOOKUP_PARENT);
58993 error = -EISDIR;
58994 /* trailing slashes? */
58995 @@ -2957,7 +3018,7 @@ retry_lookup:
58996 */
58997 }
58998 mutex_lock(&dir->d_inode->i_mutex);
58999 - error = lookup_open(nd, path, file, op, got_write, opened);
59000 + error = lookup_open(nd, path, link, file, op, got_write, opened);
59001 mutex_unlock(&dir->d_inode->i_mutex);
59002
59003 if (error <= 0) {
59004 @@ -2981,11 +3042,28 @@ retry_lookup:
59005 goto finish_open_created;
59006 }
59007
59008 + if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
59009 + error = -ENOENT;
59010 + goto exit_dput;
59011 + }
59012 + if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
59013 + error = -EACCES;
59014 + goto exit_dput;
59015 + }
59016 +
59017 /*
59018 * create/update audit record if it already exists.
59019 */
59020 - if (path->dentry->d_inode)
59021 + if (path->dentry->d_inode) {
59022 + /* only check if O_CREAT is specified, all other checks need to go
59023 + into may_open */
59024 + if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
59025 + error = -EACCES;
59026 + goto exit_dput;
59027 + }
59028 +
59029 audit_inode(name, path->dentry, 0);
59030 + }
59031
59032 /*
59033 * If atomic_open() acquired write access it is dropped now due to
59034 @@ -3026,6 +3104,11 @@ finish_lookup:
59035 }
59036 }
59037 BUG_ON(inode != path->dentry->d_inode);
59038 + /* if we're resolving a symlink to another symlink */
59039 + if (link && gr_handle_symlink_owner(link, inode)) {
59040 + error = -EACCES;
59041 + goto out;
59042 + }
59043 return 1;
59044 }
59045
59046 @@ -3035,7 +3118,6 @@ finish_lookup:
59047 save_parent.dentry = nd->path.dentry;
59048 save_parent.mnt = mntget(path->mnt);
59049 nd->path.dentry = path->dentry;
59050 -
59051 }
59052 nd->inode = inode;
59053 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
59054 @@ -3045,7 +3127,18 @@ finish_open:
59055 path_put(&save_parent);
59056 return error;
59057 }
59058 +
59059 + if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
59060 + error = -ENOENT;
59061 + goto out;
59062 + }
59063 + if (link && gr_handle_symlink_owner(link, nd->inode)) {
59064 + error = -EACCES;
59065 + goto out;
59066 + }
59067 +
59068 audit_inode(name, nd->path.dentry, 0);
59069 +
59070 error = -EISDIR;
59071 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
59072 goto out;
59073 @@ -3208,7 +3301,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
59074 if (unlikely(error))
59075 goto out;
59076
59077 - error = do_last(nd, &path, file, op, &opened, pathname);
59078 + error = do_last(nd, &path, NULL, file, op, &opened, pathname);
59079 while (unlikely(error > 0)) { /* trailing symlink */
59080 struct path link = path;
59081 void *cookie;
59082 @@ -3226,7 +3319,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
59083 error = follow_link(&link, nd, &cookie);
59084 if (unlikely(error))
59085 break;
59086 - error = do_last(nd, &path, file, op, &opened, pathname);
59087 + error = do_last(nd, &path, &link, file, op, &opened, pathname);
59088 put_link(nd, &link, cookie);
59089 }
59090 out:
59091 @@ -3326,8 +3419,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
59092 goto unlock;
59093
59094 error = -EEXIST;
59095 - if (dentry->d_inode)
59096 + if (dentry->d_inode) {
59097 + if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
59098 + error = -ENOENT;
59099 + }
59100 goto fail;
59101 + }
59102 /*
59103 * Special case - lookup gave negative, but... we had foo/bar/
59104 * From the vfs_mknod() POV we just have a negative dentry -
59105 @@ -3379,6 +3476,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
59106 }
59107 EXPORT_SYMBOL(user_path_create);
59108
59109 +static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
59110 +{
59111 + struct filename *tmp = getname(pathname);
59112 + struct dentry *res;
59113 + if (IS_ERR(tmp))
59114 + return ERR_CAST(tmp);
59115 + res = kern_path_create(dfd, tmp->name, path, lookup_flags);
59116 + if (IS_ERR(res))
59117 + putname(tmp);
59118 + else
59119 + *to = tmp;
59120 + return res;
59121 +}
59122 +
59123 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
59124 {
59125 int error = may_create(dir, dentry);
59126 @@ -3441,6 +3552,17 @@ retry:
59127
59128 if (!IS_POSIXACL(path.dentry->d_inode))
59129 mode &= ~current_umask();
59130 +
59131 + if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
59132 + error = -EPERM;
59133 + goto out;
59134 + }
59135 +
59136 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
59137 + error = -EACCES;
59138 + goto out;
59139 + }
59140 +
59141 error = security_path_mknod(&path, dentry, mode, dev);
59142 if (error)
59143 goto out;
59144 @@ -3457,6 +3579,8 @@ retry:
59145 break;
59146 }
59147 out:
59148 + if (!error)
59149 + gr_handle_create(dentry, path.mnt);
59150 done_path_create(&path, dentry);
59151 if (retry_estale(error, lookup_flags)) {
59152 lookup_flags |= LOOKUP_REVAL;
59153 @@ -3509,9 +3633,16 @@ retry:
59154
59155 if (!IS_POSIXACL(path.dentry->d_inode))
59156 mode &= ~current_umask();
59157 + if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
59158 + error = -EACCES;
59159 + goto out;
59160 + }
59161 error = security_path_mkdir(&path, dentry, mode);
59162 if (!error)
59163 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
59164 + if (!error)
59165 + gr_handle_create(dentry, path.mnt);
59166 +out:
59167 done_path_create(&path, dentry);
59168 if (retry_estale(error, lookup_flags)) {
59169 lookup_flags |= LOOKUP_REVAL;
59170 @@ -3592,6 +3723,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
59171 struct filename *name;
59172 struct dentry *dentry;
59173 struct nameidata nd;
59174 + ino_t saved_ino = 0;
59175 + dev_t saved_dev = 0;
59176 unsigned int lookup_flags = 0;
59177 retry:
59178 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
59179 @@ -3624,10 +3757,21 @@ retry:
59180 error = -ENOENT;
59181 goto exit3;
59182 }
59183 +
59184 + saved_ino = dentry->d_inode->i_ino;
59185 + saved_dev = gr_get_dev_from_dentry(dentry);
59186 +
59187 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
59188 + error = -EACCES;
59189 + goto exit3;
59190 + }
59191 +
59192 error = security_path_rmdir(&nd.path, dentry);
59193 if (error)
59194 goto exit3;
59195 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
59196 + if (!error && (saved_dev || saved_ino))
59197 + gr_handle_delete(saved_ino, saved_dev);
59198 exit3:
59199 dput(dentry);
59200 exit2:
59201 @@ -3693,6 +3837,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
59202 struct dentry *dentry;
59203 struct nameidata nd;
59204 struct inode *inode = NULL;
59205 + ino_t saved_ino = 0;
59206 + dev_t saved_dev = 0;
59207 unsigned int lookup_flags = 0;
59208 retry:
59209 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
59210 @@ -3719,10 +3865,22 @@ retry:
59211 if (!inode)
59212 goto slashes;
59213 ihold(inode);
59214 +
59215 + if (inode->i_nlink <= 1) {
59216 + saved_ino = inode->i_ino;
59217 + saved_dev = gr_get_dev_from_dentry(dentry);
59218 + }
59219 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
59220 + error = -EACCES;
59221 + goto exit2;
59222 + }
59223 +
59224 error = security_path_unlink(&nd.path, dentry);
59225 if (error)
59226 goto exit2;
59227 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
59228 + if (!error && (saved_ino || saved_dev))
59229 + gr_handle_delete(saved_ino, saved_dev);
59230 exit2:
59231 dput(dentry);
59232 }
59233 @@ -3800,9 +3958,17 @@ retry:
59234 if (IS_ERR(dentry))
59235 goto out_putname;
59236
59237 + if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
59238 + error = -EACCES;
59239 + goto out;
59240 + }
59241 +
59242 error = security_path_symlink(&path, dentry, from->name);
59243 if (!error)
59244 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
59245 + if (!error)
59246 + gr_handle_create(dentry, path.mnt);
59247 +out:
59248 done_path_create(&path, dentry);
59249 if (retry_estale(error, lookup_flags)) {
59250 lookup_flags |= LOOKUP_REVAL;
59251 @@ -3882,6 +4048,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
59252 {
59253 struct dentry *new_dentry;
59254 struct path old_path, new_path;
59255 + struct filename *to = NULL;
59256 int how = 0;
59257 int error;
59258
59259 @@ -3905,7 +4072,7 @@ retry:
59260 if (error)
59261 return error;
59262
59263 - new_dentry = user_path_create(newdfd, newname, &new_path,
59264 + new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
59265 (how & LOOKUP_REVAL));
59266 error = PTR_ERR(new_dentry);
59267 if (IS_ERR(new_dentry))
59268 @@ -3917,11 +4084,28 @@ retry:
59269 error = may_linkat(&old_path);
59270 if (unlikely(error))
59271 goto out_dput;
59272 +
59273 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
59274 + old_path.dentry->d_inode,
59275 + old_path.dentry->d_inode->i_mode, to)) {
59276 + error = -EACCES;
59277 + goto out_dput;
59278 + }
59279 +
59280 + if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
59281 + old_path.dentry, old_path.mnt, to)) {
59282 + error = -EACCES;
59283 + goto out_dput;
59284 + }
59285 +
59286 error = security_path_link(old_path.dentry, &new_path, new_dentry);
59287 if (error)
59288 goto out_dput;
59289 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
59290 + if (!error)
59291 + gr_handle_create(new_dentry, new_path.mnt);
59292 out_dput:
59293 + putname(to);
59294 done_path_create(&new_path, new_dentry);
59295 if (retry_estale(error, how)) {
59296 how |= LOOKUP_REVAL;
59297 @@ -4167,12 +4351,21 @@ retry:
59298 if (new_dentry == trap)
59299 goto exit5;
59300
59301 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
59302 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
59303 + to);
59304 + if (error)
59305 + goto exit5;
59306 +
59307 error = security_path_rename(&oldnd.path, old_dentry,
59308 &newnd.path, new_dentry);
59309 if (error)
59310 goto exit5;
59311 error = vfs_rename(old_dir->d_inode, old_dentry,
59312 new_dir->d_inode, new_dentry);
59313 + if (!error)
59314 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
59315 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
59316 exit5:
59317 dput(new_dentry);
59318 exit4:
59319 @@ -4204,6 +4397,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
59320
59321 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
59322 {
59323 + char tmpbuf[64];
59324 + const char *newlink;
59325 int len;
59326
59327 len = PTR_ERR(link);
59328 @@ -4213,7 +4408,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
59329 len = strlen(link);
59330 if (len > (unsigned) buflen)
59331 len = buflen;
59332 - if (copy_to_user(buffer, link, len))
59333 +
59334 + if (len < sizeof(tmpbuf)) {
59335 + memcpy(tmpbuf, link, len);
59336 + newlink = tmpbuf;
59337 + } else
59338 + newlink = link;
59339 +
59340 + if (copy_to_user(buffer, newlink, len))
59341 len = -EFAULT;
59342 out:
59343 return len;
59344 diff --git a/fs/namespace.c b/fs/namespace.c
59345 index da5c494..a755a54 100644
59346 --- a/fs/namespace.c
59347 +++ b/fs/namespace.c
59348 @@ -1268,6 +1268,9 @@ static int do_umount(struct mount *mnt, int flags)
59349 if (!(sb->s_flags & MS_RDONLY))
59350 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
59351 up_write(&sb->s_umount);
59352 +
59353 + gr_log_remount(mnt->mnt_devname, retval);
59354 +
59355 return retval;
59356 }
59357
59358 @@ -1286,6 +1289,9 @@ static int do_umount(struct mount *mnt, int flags)
59359 }
59360 br_write_unlock(&vfsmount_lock);
59361 namespace_unlock();
59362 +
59363 + gr_log_unmount(mnt->mnt_devname, retval);
59364 +
59365 return retval;
59366 }
59367
59368 @@ -1305,7 +1311,7 @@ static inline bool may_mount(void)
59369 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
59370 */
59371
59372 -SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
59373 +SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
59374 {
59375 struct path path;
59376 struct mount *mnt;
59377 @@ -1347,7 +1353,7 @@ out:
59378 /*
59379 * The 2.0 compatible umount. No flags.
59380 */
59381 -SYSCALL_DEFINE1(oldumount, char __user *, name)
59382 +SYSCALL_DEFINE1(oldumount, const char __user *, name)
59383 {
59384 return sys_umount(name, 0);
59385 }
59386 @@ -2358,6 +2364,16 @@ long do_mount(const char *dev_name, const char *dir_name,
59387 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
59388 MS_STRICTATIME);
59389
59390 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
59391 + retval = -EPERM;
59392 + goto dput_out;
59393 + }
59394 +
59395 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
59396 + retval = -EPERM;
59397 + goto dput_out;
59398 + }
59399 +
59400 if (flags & MS_REMOUNT)
59401 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
59402 data_page);
59403 @@ -2372,6 +2388,9 @@ long do_mount(const char *dev_name, const char *dir_name,
59404 dev_name, data_page);
59405 dput_out:
59406 path_put(&path);
59407 +
59408 + gr_log_mount(dev_name, dir_name, retval);
59409 +
59410 return retval;
59411 }
59412
59413 @@ -2389,7 +2408,7 @@ static void free_mnt_ns(struct mnt_namespace *ns)
59414 * number incrementing at 10Ghz will take 12,427 years to wrap which
59415 * is effectively never, so we can ignore the possibility.
59416 */
59417 -static atomic64_t mnt_ns_seq = ATOMIC64_INIT(1);
59418 +static atomic64_unchecked_t mnt_ns_seq = ATOMIC64_INIT(1);
59419
59420 static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
59421 {
59422 @@ -2404,7 +2423,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
59423 kfree(new_ns);
59424 return ERR_PTR(ret);
59425 }
59426 - new_ns->seq = atomic64_add_return(1, &mnt_ns_seq);
59427 + new_ns->seq = atomic64_inc_return_unchecked(&mnt_ns_seq);
59428 atomic_set(&new_ns->count, 1);
59429 new_ns->root = NULL;
59430 INIT_LIST_HEAD(&new_ns->list);
59431 @@ -2418,7 +2437,7 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)
59432 * Allocate a new namespace structure and populate it with contents
59433 * copied from the namespace of the passed in task structure.
59434 */
59435 -static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
59436 +static __latent_entropy struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
59437 struct user_namespace *user_ns, struct fs_struct *fs)
59438 {
59439 struct mnt_namespace *new_ns;
59440 @@ -2549,8 +2568,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
59441 }
59442 EXPORT_SYMBOL(mount_subtree);
59443
59444 -SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
59445 - char __user *, type, unsigned long, flags, void __user *, data)
59446 +SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
59447 + const char __user *, type, unsigned long, flags, void __user *, data)
59448 {
59449 int ret;
59450 char *kernel_type;
59451 @@ -2663,6 +2682,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
59452 if (error)
59453 goto out2;
59454
59455 + if (gr_handle_chroot_pivot()) {
59456 + error = -EPERM;
59457 + goto out2;
59458 + }
59459 +
59460 get_fs_root(current->fs, &root);
59461 old_mp = lock_mount(&old);
59462 error = PTR_ERR(old_mp);
59463 @@ -2932,7 +2956,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
59464 !ns_capable(current_user_ns(), CAP_SYS_ADMIN))
59465 return -EPERM;
59466
59467 - if (fs->users != 1)
59468 + if (atomic_read(&fs->users) != 1)
59469 return -EINVAL;
59470
59471 get_mnt_ns(mnt_ns);
59472 diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
59473 index f4ccfe6..a5cf064 100644
59474 --- a/fs/nfs/callback_xdr.c
59475 +++ b/fs/nfs/callback_xdr.c
59476 @@ -51,7 +51,7 @@ struct callback_op {
59477 callback_decode_arg_t decode_args;
59478 callback_encode_res_t encode_res;
59479 long res_maxsize;
59480 -};
59481 +} __do_const;
59482
59483 static struct callback_op callback_ops[];
59484
59485 diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
59486 index eda8879..bfc6837 100644
59487 --- a/fs/nfs/inode.c
59488 +++ b/fs/nfs/inode.c
59489 @@ -1150,16 +1150,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
59490 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
59491 }
59492
59493 -static atomic_long_t nfs_attr_generation_counter;
59494 +static atomic_long_unchecked_t nfs_attr_generation_counter;
59495
59496 static unsigned long nfs_read_attr_generation_counter(void)
59497 {
59498 - return atomic_long_read(&nfs_attr_generation_counter);
59499 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
59500 }
59501
59502 unsigned long nfs_inc_attr_generation_counter(void)
59503 {
59504 - return atomic_long_inc_return(&nfs_attr_generation_counter);
59505 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
59506 }
59507
59508 void nfs_fattr_init(struct nfs_fattr *fattr)
59509 diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
59510 index 419572f..5414a23 100644
59511 --- a/fs/nfsd/nfs4proc.c
59512 +++ b/fs/nfsd/nfs4proc.c
59513 @@ -1168,7 +1168,7 @@ struct nfsd4_operation {
59514 nfsd4op_rsize op_rsize_bop;
59515 stateid_getter op_get_currentstateid;
59516 stateid_setter op_set_currentstateid;
59517 -};
59518 +} __do_const;
59519
59520 static struct nfsd4_operation nfsd4_ops[];
59521
59522 diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
59523 index ecc735e..79b2d31 100644
59524 --- a/fs/nfsd/nfs4xdr.c
59525 +++ b/fs/nfsd/nfs4xdr.c
59526 @@ -1500,7 +1500,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
59527
59528 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
59529
59530 -static nfsd4_dec nfsd4_dec_ops[] = {
59531 +static const nfsd4_dec nfsd4_dec_ops[] = {
59532 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
59533 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
59534 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
59535 @@ -1540,7 +1540,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
59536 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
59537 };
59538
59539 -static nfsd4_dec nfsd41_dec_ops[] = {
59540 +static const nfsd4_dec nfsd41_dec_ops[] = {
59541 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
59542 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
59543 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
59544 @@ -1602,7 +1602,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
59545 };
59546
59547 struct nfsd4_minorversion_ops {
59548 - nfsd4_dec *decoders;
59549 + const nfsd4_dec *decoders;
59550 int nops;
59551 };
59552
59553 diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
59554 index b6af150..f6ec5e3 100644
59555 --- a/fs/nfsd/nfscache.c
59556 +++ b/fs/nfsd/nfscache.c
59557 @@ -547,14 +547,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
59558 {
59559 struct svc_cacherep *rp = rqstp->rq_cacherep;
59560 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
59561 - int len;
59562 + long len;
59563 size_t bufsize = 0;
59564
59565 if (!rp)
59566 return;
59567
59568 - len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
59569 - len >>= 2;
59570 + if (statp) {
59571 + len = (char*)statp - (char*)resv->iov_base;
59572 + len = resv->iov_len - len;
59573 + len >>= 2;
59574 + }
59575
59576 /* Don't cache excessive amounts of data and XDR failures */
59577 if (!statp || len > (256 >> 2)) {
59578 diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
59579 index 72cb28e..5b5f87d 100644
59580 --- a/fs/nfsd/vfs.c
59581 +++ b/fs/nfsd/vfs.c
59582 @@ -993,7 +993,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
59583 } else {
59584 oldfs = get_fs();
59585 set_fs(KERNEL_DS);
59586 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
59587 + host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
59588 set_fs(oldfs);
59589 }
59590
59591 @@ -1080,7 +1080,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
59592
59593 /* Write the data. */
59594 oldfs = get_fs(); set_fs(KERNEL_DS);
59595 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
59596 + host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
59597 set_fs(oldfs);
59598 if (host_err < 0)
59599 goto out_nfserr;
59600 @@ -1626,7 +1626,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
59601 */
59602
59603 oldfs = get_fs(); set_fs(KERNEL_DS);
59604 - host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
59605 + host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
59606 set_fs(oldfs);
59607
59608 if (host_err < 0)
59609 diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
59610 index fea6bd5..8ee9d81 100644
59611 --- a/fs/nls/nls_base.c
59612 +++ b/fs/nls/nls_base.c
59613 @@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
59614
59615 int register_nls(struct nls_table * nls)
59616 {
59617 - struct nls_table ** tmp = &tables;
59618 + struct nls_table *tmp = tables;
59619
59620 if (nls->next)
59621 return -EBUSY;
59622
59623 spin_lock(&nls_lock);
59624 - while (*tmp) {
59625 - if (nls == *tmp) {
59626 + while (tmp) {
59627 + if (nls == tmp) {
59628 spin_unlock(&nls_lock);
59629 return -EBUSY;
59630 }
59631 - tmp = &(*tmp)->next;
59632 + tmp = tmp->next;
59633 }
59634 - nls->next = tables;
59635 + pax_open_kernel();
59636 + *(struct nls_table **)&nls->next = tables;
59637 + pax_close_kernel();
59638 tables = nls;
59639 spin_unlock(&nls_lock);
59640 return 0;
59641 @@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
59642
59643 int unregister_nls(struct nls_table * nls)
59644 {
59645 - struct nls_table ** tmp = &tables;
59646 + struct nls_table * const * tmp = &tables;
59647
59648 spin_lock(&nls_lock);
59649 while (*tmp) {
59650 if (nls == *tmp) {
59651 - *tmp = nls->next;
59652 + pax_open_kernel();
59653 + *(struct nls_table **)tmp = nls->next;
59654 + pax_close_kernel();
59655 spin_unlock(&nls_lock);
59656 return 0;
59657 }
59658 diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
59659 index 7424929..35f6be5 100644
59660 --- a/fs/nls/nls_euc-jp.c
59661 +++ b/fs/nls/nls_euc-jp.c
59662 @@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
59663 p_nls = load_nls("cp932");
59664
59665 if (p_nls) {
59666 - table.charset2upper = p_nls->charset2upper;
59667 - table.charset2lower = p_nls->charset2lower;
59668 + pax_open_kernel();
59669 + *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
59670 + *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
59671 + pax_close_kernel();
59672 return register_nls(&table);
59673 }
59674
59675 diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
59676 index e7bc1d7..06bd4bb 100644
59677 --- a/fs/nls/nls_koi8-ru.c
59678 +++ b/fs/nls/nls_koi8-ru.c
59679 @@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
59680 p_nls = load_nls("koi8-u");
59681
59682 if (p_nls) {
59683 - table.charset2upper = p_nls->charset2upper;
59684 - table.charset2lower = p_nls->charset2lower;
59685 + pax_open_kernel();
59686 + *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
59687 + *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
59688 + pax_close_kernel();
59689 return register_nls(&table);
59690 }
59691
59692 diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
59693 index e44cb64..4807084 100644
59694 --- a/fs/notify/fanotify/fanotify_user.c
59695 +++ b/fs/notify/fanotify/fanotify_user.c
59696 @@ -253,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
59697
59698 fd = fanotify_event_metadata.fd;
59699 ret = -EFAULT;
59700 - if (copy_to_user(buf, &fanotify_event_metadata,
59701 - fanotify_event_metadata.event_len))
59702 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
59703 + copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
59704 goto out_close_fd;
59705
59706 ret = prepare_for_access_response(group, event, fd);
59707 diff --git a/fs/notify/notification.c b/fs/notify/notification.c
59708 index 7b51b05..5ea5ef6 100644
59709 --- a/fs/notify/notification.c
59710 +++ b/fs/notify/notification.c
59711 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
59712 * get set to 0 so it will never get 'freed'
59713 */
59714 static struct fsnotify_event *q_overflow_event;
59715 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
59716 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
59717
59718 /**
59719 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
59720 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
59721 */
59722 u32 fsnotify_get_cookie(void)
59723 {
59724 - return atomic_inc_return(&fsnotify_sync_cookie);
59725 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
59726 }
59727 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
59728
59729 diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
59730 index 9e38daf..5727cae 100644
59731 --- a/fs/ntfs/dir.c
59732 +++ b/fs/ntfs/dir.c
59733 @@ -1310,7 +1310,7 @@ find_next_index_buffer:
59734 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
59735 ~(s64)(ndir->itype.index.block_size - 1)));
59736 /* Bounds checks. */
59737 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
59738 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
59739 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
59740 "inode 0x%lx or driver bug.", vdir->i_ino);
59741 goto err_out;
59742 diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
59743 index ea4ba9d..1e13d34 100644
59744 --- a/fs/ntfs/file.c
59745 +++ b/fs/ntfs/file.c
59746 @@ -1282,7 +1282,7 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
59747 char *addr;
59748 size_t total = 0;
59749 unsigned len;
59750 - int left;
59751 + unsigned left;
59752
59753 do {
59754 len = PAGE_CACHE_SIZE - ofs;
59755 diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
59756 index 82650d5..db37dcf 100644
59757 --- a/fs/ntfs/super.c
59758 +++ b/fs/ntfs/super.c
59759 @@ -685,7 +685,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
59760 if (!silent)
59761 ntfs_error(sb, "Primary boot sector is invalid.");
59762 } else if (!silent)
59763 - ntfs_error(sb, read_err_str, "primary");
59764 + ntfs_error(sb, read_err_str, "%s", "primary");
59765 if (!(NTFS_SB(sb)->on_errors & ON_ERRORS_RECOVER)) {
59766 if (bh_primary)
59767 brelse(bh_primary);
59768 @@ -701,7 +701,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
59769 goto hotfix_primary_boot_sector;
59770 brelse(bh_backup);
59771 } else if (!silent)
59772 - ntfs_error(sb, read_err_str, "backup");
59773 + ntfs_error(sb, read_err_str, "%s", "backup");
59774 /* Try to read NT3.51- backup boot sector. */
59775 if ((bh_backup = sb_bread(sb, nr_blocks >> 1))) {
59776 if (is_boot_sector_ntfs(sb, (NTFS_BOOT_SECTOR*)
59777 @@ -712,7 +712,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
59778 "sector.");
59779 brelse(bh_backup);
59780 } else if (!silent)
59781 - ntfs_error(sb, read_err_str, "backup");
59782 + ntfs_error(sb, read_err_str, "%s", "backup");
59783 /* We failed. Cleanup and return. */
59784 if (bh_primary)
59785 brelse(bh_primary);
59786 diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
59787 index cd5496b..26a1055 100644
59788 --- a/fs/ocfs2/localalloc.c
59789 +++ b/fs/ocfs2/localalloc.c
59790 @@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
59791 goto bail;
59792 }
59793
59794 - atomic_inc(&osb->alloc_stats.moves);
59795 + atomic_inc_unchecked(&osb->alloc_stats.moves);
59796
59797 bail:
59798 if (handle)
59799 diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
59800 index 3a90347..c40bef8 100644
59801 --- a/fs/ocfs2/ocfs2.h
59802 +++ b/fs/ocfs2/ocfs2.h
59803 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
59804
59805 struct ocfs2_alloc_stats
59806 {
59807 - atomic_t moves;
59808 - atomic_t local_data;
59809 - atomic_t bitmap_data;
59810 - atomic_t bg_allocs;
59811 - atomic_t bg_extends;
59812 + atomic_unchecked_t moves;
59813 + atomic_unchecked_t local_data;
59814 + atomic_unchecked_t bitmap_data;
59815 + atomic_unchecked_t bg_allocs;
59816 + atomic_unchecked_t bg_extends;
59817 };
59818
59819 enum ocfs2_local_alloc_state
59820 diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
59821 index 5397c07..54afc55 100644
59822 --- a/fs/ocfs2/suballoc.c
59823 +++ b/fs/ocfs2/suballoc.c
59824 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
59825 mlog_errno(status);
59826 goto bail;
59827 }
59828 - atomic_inc(&osb->alloc_stats.bg_extends);
59829 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
59830
59831 /* You should never ask for this much metadata */
59832 BUG_ON(bits_wanted >
59833 @@ -2000,7 +2000,7 @@ int ocfs2_claim_metadata(handle_t *handle,
59834 mlog_errno(status);
59835 goto bail;
59836 }
59837 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
59838 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
59839
59840 *suballoc_loc = res.sr_bg_blkno;
59841 *suballoc_bit_start = res.sr_bit_offset;
59842 @@ -2164,7 +2164,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
59843 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
59844 res->sr_bits);
59845
59846 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
59847 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
59848
59849 BUG_ON(res->sr_bits != 1);
59850
59851 @@ -2206,7 +2206,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
59852 mlog_errno(status);
59853 goto bail;
59854 }
59855 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
59856 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
59857
59858 BUG_ON(res.sr_bits != 1);
59859
59860 @@ -2310,7 +2310,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
59861 cluster_start,
59862 num_clusters);
59863 if (!status)
59864 - atomic_inc(&osb->alloc_stats.local_data);
59865 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
59866 } else {
59867 if (min_clusters > (osb->bitmap_cpg - 1)) {
59868 /* The only paths asking for contiguousness
59869 @@ -2336,7 +2336,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
59870 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
59871 res.sr_bg_blkno,
59872 res.sr_bit_offset);
59873 - atomic_inc(&osb->alloc_stats.bitmap_data);
59874 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
59875 *num_clusters = res.sr_bits;
59876 }
59877 }
59878 diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
59879 index d4e81e4..ad89f5f 100644
59880 --- a/fs/ocfs2/super.c
59881 +++ b/fs/ocfs2/super.c
59882 @@ -300,11 +300,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
59883 "%10s => GlobalAllocs: %d LocalAllocs: %d "
59884 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
59885 "Stats",
59886 - atomic_read(&osb->alloc_stats.bitmap_data),
59887 - atomic_read(&osb->alloc_stats.local_data),
59888 - atomic_read(&osb->alloc_stats.bg_allocs),
59889 - atomic_read(&osb->alloc_stats.moves),
59890 - atomic_read(&osb->alloc_stats.bg_extends));
59891 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
59892 + atomic_read_unchecked(&osb->alloc_stats.local_data),
59893 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
59894 + atomic_read_unchecked(&osb->alloc_stats.moves),
59895 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
59896
59897 out += snprintf(buf + out, len - out,
59898 "%10s => State: %u Descriptor: %llu Size: %u bits "
59899 @@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
59900 spin_lock_init(&osb->osb_xattr_lock);
59901 ocfs2_init_steal_slots(osb);
59902
59903 - atomic_set(&osb->alloc_stats.moves, 0);
59904 - atomic_set(&osb->alloc_stats.local_data, 0);
59905 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
59906 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
59907 - atomic_set(&osb->alloc_stats.bg_extends, 0);
59908 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
59909 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
59910 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
59911 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
59912 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
59913
59914 /* Copy the blockcheck stats from the superblock probe */
59915 osb->osb_ecc_stats = *stats;
59916 diff --git a/fs/open.c b/fs/open.c
59917 index d420331..2dbb3fd 100644
59918 --- a/fs/open.c
59919 +++ b/fs/open.c
59920 @@ -32,6 +32,8 @@
59921 #include <linux/dnotify.h>
59922 #include <linux/compat.h>
59923
59924 +#define CREATE_TRACE_POINTS
59925 +#include <trace/events/fs.h>
59926 #include "internal.h"
59927
59928 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
59929 @@ -102,6 +104,8 @@ long vfs_truncate(struct path *path, loff_t length)
59930 error = locks_verify_truncate(inode, NULL, length);
59931 if (!error)
59932 error = security_path_truncate(path);
59933 + if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
59934 + error = -EACCES;
59935 if (!error)
59936 error = do_truncate(path->dentry, length, 0, NULL);
59937
59938 @@ -186,6 +190,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
59939 error = locks_verify_truncate(inode, f.file, length);
59940 if (!error)
59941 error = security_path_truncate(&f.file->f_path);
59942 + if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
59943 + error = -EACCES;
59944 if (!error)
59945 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
59946 sb_end_write(inode->i_sb);
59947 @@ -360,6 +366,9 @@ retry:
59948 if (__mnt_is_readonly(path.mnt))
59949 res = -EROFS;
59950
59951 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
59952 + res = -EACCES;
59953 +
59954 out_path_release:
59955 path_put(&path);
59956 if (retry_estale(res, lookup_flags)) {
59957 @@ -391,6 +400,8 @@ retry:
59958 if (error)
59959 goto dput_and_out;
59960
59961 + gr_log_chdir(path.dentry, path.mnt);
59962 +
59963 set_fs_pwd(current->fs, &path);
59964
59965 dput_and_out:
59966 @@ -420,6 +431,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
59967 goto out_putf;
59968
59969 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
59970 +
59971 + if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
59972 + error = -EPERM;
59973 +
59974 + if (!error)
59975 + gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
59976 +
59977 if (!error)
59978 set_fs_pwd(current->fs, &f.file->f_path);
59979 out_putf:
59980 @@ -449,7 +467,13 @@ retry:
59981 if (error)
59982 goto dput_and_out;
59983
59984 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
59985 + goto dput_and_out;
59986 +
59987 set_fs_root(current->fs, &path);
59988 +
59989 + gr_handle_chroot_chdir(&path);
59990 +
59991 error = 0;
59992 dput_and_out:
59993 path_put(&path);
59994 @@ -471,6 +495,16 @@ static int chmod_common(struct path *path, umode_t mode)
59995 if (error)
59996 return error;
59997 mutex_lock(&inode->i_mutex);
59998 +
59999 + if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
60000 + error = -EACCES;
60001 + goto out_unlock;
60002 + }
60003 + if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
60004 + error = -EACCES;
60005 + goto out_unlock;
60006 + }
60007 +
60008 error = security_path_chmod(path, mode);
60009 if (error)
60010 goto out_unlock;
60011 @@ -530,6 +564,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
60012 uid = make_kuid(current_user_ns(), user);
60013 gid = make_kgid(current_user_ns(), group);
60014
60015 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
60016 + return -EACCES;
60017 +
60018 newattrs.ia_valid = ATTR_CTIME;
60019 if (user != (uid_t) -1) {
60020 if (!uid_valid(uid))
60021 @@ -974,6 +1011,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
60022 } else {
60023 fsnotify_open(f);
60024 fd_install(fd, f);
60025 + trace_do_sys_open(tmp->name, flags, mode);
60026 }
60027 }
60028 putname(tmp);
60029 diff --git a/fs/pipe.c b/fs/pipe.c
60030 index 0e0752e..7cfdd50 100644
60031 --- a/fs/pipe.c
60032 +++ b/fs/pipe.c
60033 @@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
60034
60035 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
60036 {
60037 - if (pipe->files)
60038 + if (atomic_read(&pipe->files))
60039 mutex_lock_nested(&pipe->mutex, subclass);
60040 }
60041
60042 @@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
60043
60044 void pipe_unlock(struct pipe_inode_info *pipe)
60045 {
60046 - if (pipe->files)
60047 + if (atomic_read(&pipe->files))
60048 mutex_unlock(&pipe->mutex);
60049 }
60050 EXPORT_SYMBOL(pipe_unlock);
60051 @@ -449,9 +449,9 @@ redo:
60052 }
60053 if (bufs) /* More to do? */
60054 continue;
60055 - if (!pipe->writers)
60056 + if (!atomic_read(&pipe->writers))
60057 break;
60058 - if (!pipe->waiting_writers) {
60059 + if (!atomic_read(&pipe->waiting_writers)) {
60060 /* syscall merging: Usually we must not sleep
60061 * if O_NONBLOCK is set, or if we got some data.
60062 * But if a writer sleeps in kernel space, then
60063 @@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
60064 ret = 0;
60065 __pipe_lock(pipe);
60066
60067 - if (!pipe->readers) {
60068 + if (!atomic_read(&pipe->readers)) {
60069 send_sig(SIGPIPE, current, 0);
60070 ret = -EPIPE;
60071 goto out;
60072 @@ -562,7 +562,7 @@ redo1:
60073 for (;;) {
60074 int bufs;
60075
60076 - if (!pipe->readers) {
60077 + if (!atomic_read(&pipe->readers)) {
60078 send_sig(SIGPIPE, current, 0);
60079 if (!ret)
60080 ret = -EPIPE;
60081 @@ -653,9 +653,9 @@ redo2:
60082 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60083 do_wakeup = 0;
60084 }
60085 - pipe->waiting_writers++;
60086 + atomic_inc(&pipe->waiting_writers);
60087 pipe_wait(pipe);
60088 - pipe->waiting_writers--;
60089 + atomic_dec(&pipe->waiting_writers);
60090 }
60091 out:
60092 __pipe_unlock(pipe);
60093 @@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
60094 mask = 0;
60095 if (filp->f_mode & FMODE_READ) {
60096 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
60097 - if (!pipe->writers && filp->f_version != pipe->w_counter)
60098 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
60099 mask |= POLLHUP;
60100 }
60101
60102 @@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
60103 * Most Unices do not set POLLERR for FIFOs but on Linux they
60104 * behave exactly like pipes for poll().
60105 */
60106 - if (!pipe->readers)
60107 + if (!atomic_read(&pipe->readers))
60108 mask |= POLLERR;
60109 }
60110
60111 @@ -731,7 +731,7 @@ static void put_pipe_info(struct inode *inode, struct pipe_inode_info *pipe)
60112 int kill = 0;
60113
60114 spin_lock(&inode->i_lock);
60115 - if (!--pipe->files) {
60116 + if (atomic_dec_and_test(&pipe->files)) {
60117 inode->i_pipe = NULL;
60118 kill = 1;
60119 }
60120 @@ -748,11 +748,11 @@ pipe_release(struct inode *inode, struct file *file)
60121
60122 __pipe_lock(pipe);
60123 if (file->f_mode & FMODE_READ)
60124 - pipe->readers--;
60125 + atomic_dec(&pipe->readers);
60126 if (file->f_mode & FMODE_WRITE)
60127 - pipe->writers--;
60128 + atomic_dec(&pipe->writers);
60129
60130 - if (pipe->readers || pipe->writers) {
60131 + if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
60132 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
60133 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
60134 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
60135 @@ -817,7 +817,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
60136 kfree(pipe);
60137 }
60138
60139 -static struct vfsmount *pipe_mnt __read_mostly;
60140 +struct vfsmount *pipe_mnt __read_mostly;
60141
60142 /*
60143 * pipefs_dname() is called from d_path().
60144 @@ -847,8 +847,9 @@ static struct inode * get_pipe_inode(void)
60145 goto fail_iput;
60146
60147 inode->i_pipe = pipe;
60148 - pipe->files = 2;
60149 - pipe->readers = pipe->writers = 1;
60150 + atomic_set(&pipe->files, 2);
60151 + atomic_set(&pipe->readers, 1);
60152 + atomic_set(&pipe->writers, 1);
60153 inode->i_fop = &pipefifo_fops;
60154
60155 /*
60156 @@ -1027,17 +1028,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
60157 spin_lock(&inode->i_lock);
60158 if (inode->i_pipe) {
60159 pipe = inode->i_pipe;
60160 - pipe->files++;
60161 + atomic_inc(&pipe->files);
60162 spin_unlock(&inode->i_lock);
60163 } else {
60164 spin_unlock(&inode->i_lock);
60165 pipe = alloc_pipe_info();
60166 if (!pipe)
60167 return -ENOMEM;
60168 - pipe->files = 1;
60169 + atomic_set(&pipe->files, 1);
60170 spin_lock(&inode->i_lock);
60171 if (unlikely(inode->i_pipe)) {
60172 - inode->i_pipe->files++;
60173 + atomic_inc(&inode->i_pipe->files);
60174 spin_unlock(&inode->i_lock);
60175 free_pipe_info(pipe);
60176 pipe = inode->i_pipe;
60177 @@ -1062,10 +1063,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
60178 * opened, even when there is no process writing the FIFO.
60179 */
60180 pipe->r_counter++;
60181 - if (pipe->readers++ == 0)
60182 + if (atomic_inc_return(&pipe->readers) == 1)
60183 wake_up_partner(pipe);
60184
60185 - if (!is_pipe && !pipe->writers) {
60186 + if (!is_pipe && !atomic_read(&pipe->writers)) {
60187 if ((filp->f_flags & O_NONBLOCK)) {
60188 /* suppress POLLHUP until we have
60189 * seen a writer */
60190 @@ -1084,14 +1085,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
60191 * errno=ENXIO when there is no process reading the FIFO.
60192 */
60193 ret = -ENXIO;
60194 - if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
60195 + if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
60196 goto err;
60197
60198 pipe->w_counter++;
60199 - if (!pipe->writers++)
60200 + if (atomic_inc_return(&pipe->writers) == 1)
60201 wake_up_partner(pipe);
60202
60203 - if (!is_pipe && !pipe->readers) {
60204 + if (!is_pipe && !atomic_read(&pipe->readers)) {
60205 if (wait_for_partner(pipe, &pipe->r_counter))
60206 goto err_wr;
60207 }
60208 @@ -1105,11 +1106,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
60209 * the process can at least talk to itself.
60210 */
60211
60212 - pipe->readers++;
60213 - pipe->writers++;
60214 + atomic_inc(&pipe->readers);
60215 + atomic_inc(&pipe->writers);
60216 pipe->r_counter++;
60217 pipe->w_counter++;
60218 - if (pipe->readers == 1 || pipe->writers == 1)
60219 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
60220 wake_up_partner(pipe);
60221 break;
60222
60223 @@ -1123,13 +1124,13 @@ static int fifo_open(struct inode *inode, struct file *filp)
60224 return 0;
60225
60226 err_rd:
60227 - if (!--pipe->readers)
60228 + if (atomic_dec_and_test(&pipe->readers))
60229 wake_up_interruptible(&pipe->wait);
60230 ret = -ERESTARTSYS;
60231 goto err;
60232
60233 err_wr:
60234 - if (!--pipe->writers)
60235 + if (atomic_dec_and_test(&pipe->writers))
60236 wake_up_interruptible(&pipe->wait);
60237 ret = -ERESTARTSYS;
60238 goto err;
60239 diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
60240 index 15af622..0e9f4467 100644
60241 --- a/fs/proc/Kconfig
60242 +++ b/fs/proc/Kconfig
60243 @@ -30,12 +30,12 @@ config PROC_FS
60244
60245 config PROC_KCORE
60246 bool "/proc/kcore support" if !ARM
60247 - depends on PROC_FS && MMU
60248 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
60249
60250 config PROC_VMCORE
60251 bool "/proc/vmcore support"
60252 - depends on PROC_FS && CRASH_DUMP
60253 - default y
60254 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
60255 + default n
60256 help
60257 Exports the dump image of crashed kernel in ELF format.
60258
60259 @@ -59,8 +59,8 @@ config PROC_SYSCTL
60260 limited in memory.
60261
60262 config PROC_PAGE_MONITOR
60263 - default y
60264 - depends on PROC_FS && MMU
60265 + default n
60266 + depends on PROC_FS && MMU && !GRKERNSEC
60267 bool "Enable /proc page monitoring" if EXPERT
60268 help
60269 Various /proc files exist to monitor process memory utilization:
60270 diff --git a/fs/proc/array.c b/fs/proc/array.c
60271 index cbd0f1b..adec3f0 100644
60272 --- a/fs/proc/array.c
60273 +++ b/fs/proc/array.c
60274 @@ -60,6 +60,7 @@
60275 #include <linux/tty.h>
60276 #include <linux/string.h>
60277 #include <linux/mman.h>
60278 +#include <linux/grsecurity.h>
60279 #include <linux/proc_fs.h>
60280 #include <linux/ioport.h>
60281 #include <linux/uaccess.h>
60282 @@ -363,6 +364,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
60283 seq_putc(m, '\n');
60284 }
60285
60286 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60287 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
60288 +{
60289 + if (p->mm)
60290 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
60291 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
60292 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
60293 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
60294 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
60295 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
60296 + else
60297 + seq_printf(m, "PaX:\t-----\n");
60298 +}
60299 +#endif
60300 +
60301 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
60302 struct pid *pid, struct task_struct *task)
60303 {
60304 @@ -381,9 +397,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
60305 task_cpus_allowed(m, task);
60306 cpuset_task_status_allowed(m, task);
60307 task_context_switch_counts(m, task);
60308 +
60309 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60310 + task_pax(m, task);
60311 +#endif
60312 +
60313 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
60314 + task_grsec_rbac(m, task);
60315 +#endif
60316 +
60317 return 0;
60318 }
60319
60320 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60321 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
60322 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
60323 + _mm->pax_flags & MF_PAX_SEGMEXEC))
60324 +#endif
60325 +
60326 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
60327 struct pid *pid, struct task_struct *task, int whole)
60328 {
60329 @@ -405,6 +436,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
60330 char tcomm[sizeof(task->comm)];
60331 unsigned long flags;
60332
60333 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60334 + if (current->exec_id != m->exec_id) {
60335 + gr_log_badprocpid("stat");
60336 + return 0;
60337 + }
60338 +#endif
60339 +
60340 state = *get_task_state(task);
60341 vsize = eip = esp = 0;
60342 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
60343 @@ -476,6 +514,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
60344 gtime = task_gtime(task);
60345 }
60346
60347 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60348 + if (PAX_RAND_FLAGS(mm)) {
60349 + eip = 0;
60350 + esp = 0;
60351 + wchan = 0;
60352 + }
60353 +#endif
60354 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60355 + wchan = 0;
60356 + eip =0;
60357 + esp =0;
60358 +#endif
60359 +
60360 /* scale priority and nice values from timeslices to -20..20 */
60361 /* to make it look like a "normal" Unix priority/nice value */
60362 priority = task_prio(task);
60363 @@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
60364 seq_put_decimal_ull(m, ' ', vsize);
60365 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
60366 seq_put_decimal_ull(m, ' ', rsslim);
60367 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60368 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
60369 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
60370 + seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
60371 +#else
60372 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
60373 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
60374 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
60375 +#endif
60376 seq_put_decimal_ull(m, ' ', esp);
60377 seq_put_decimal_ull(m, ' ', eip);
60378 /* The signal information here is obsolete.
60379 @@ -536,7 +593,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
60380 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
60381 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
60382
60383 - if (mm && permitted) {
60384 + if (mm && permitted
60385 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60386 + && !PAX_RAND_FLAGS(mm)
60387 +#endif
60388 + ) {
60389 seq_put_decimal_ull(m, ' ', mm->start_data);
60390 seq_put_decimal_ull(m, ' ', mm->end_data);
60391 seq_put_decimal_ull(m, ' ', mm->start_brk);
60392 @@ -574,8 +635,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
60393 struct pid *pid, struct task_struct *task)
60394 {
60395 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
60396 - struct mm_struct *mm = get_task_mm(task);
60397 + struct mm_struct *mm;
60398
60399 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60400 + if (current->exec_id != m->exec_id) {
60401 + gr_log_badprocpid("statm");
60402 + return 0;
60403 + }
60404 +#endif
60405 + mm = get_task_mm(task);
60406 if (mm) {
60407 size = task_statm(mm, &shared, &text, &data, &resident);
60408 mmput(mm);
60409 @@ -598,6 +666,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
60410 return 0;
60411 }
60412
60413 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
60414 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
60415 +{
60416 + return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
60417 +}
60418 +#endif
60419 +
60420 #ifdef CONFIG_CHECKPOINT_RESTORE
60421 static struct pid *
60422 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
60423 diff --git a/fs/proc/base.c b/fs/proc/base.c
60424 index 1485e38..8ad4236 100644
60425 --- a/fs/proc/base.c
60426 +++ b/fs/proc/base.c
60427 @@ -113,6 +113,14 @@ struct pid_entry {
60428 union proc_op op;
60429 };
60430
60431 +struct getdents_callback {
60432 + struct linux_dirent __user * current_dir;
60433 + struct linux_dirent __user * previous;
60434 + struct file * file;
60435 + int count;
60436 + int error;
60437 +};
60438 +
60439 #define NOD(NAME, MODE, IOP, FOP, OP) { \
60440 .name = (NAME), \
60441 .len = sizeof(NAME) - 1, \
60442 @@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
60443 if (!mm->arg_end)
60444 goto out_mm; /* Shh! No looking before we're done */
60445
60446 + if (gr_acl_handle_procpidmem(task))
60447 + goto out_mm;
60448 +
60449 len = mm->arg_end - mm->arg_start;
60450
60451 if (len > PAGE_SIZE)
60452 @@ -237,12 +248,28 @@ out:
60453 return res;
60454 }
60455
60456 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60457 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
60458 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
60459 + _mm->pax_flags & MF_PAX_SEGMEXEC))
60460 +#endif
60461 +
60462 static int proc_pid_auxv(struct task_struct *task, char *buffer)
60463 {
60464 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
60465 int res = PTR_ERR(mm);
60466 if (mm && !IS_ERR(mm)) {
60467 unsigned int nwords = 0;
60468 +
60469 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60470 + /* allow if we're currently ptracing this task */
60471 + if (PAX_RAND_FLAGS(mm) &&
60472 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
60473 + mmput(mm);
60474 + return 0;
60475 + }
60476 +#endif
60477 +
60478 do {
60479 nwords += 2;
60480 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
60481 @@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
60482 }
60483
60484
60485 -#ifdef CONFIG_KALLSYMS
60486 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60487 /*
60488 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
60489 * Returns the resolved symbol. If that fails, simply return the address.
60490 @@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
60491 mutex_unlock(&task->signal->cred_guard_mutex);
60492 }
60493
60494 -#ifdef CONFIG_STACKTRACE
60495 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60496
60497 #define MAX_STACK_TRACE_DEPTH 64
60498
60499 @@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
60500 return count;
60501 }
60502
60503 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
60504 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
60505 static int proc_pid_syscall(struct task_struct *task, char *buffer)
60506 {
60507 long nr;
60508 @@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
60509 /************************************************************************/
60510
60511 /* permission checks */
60512 -static int proc_fd_access_allowed(struct inode *inode)
60513 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
60514 {
60515 struct task_struct *task;
60516 int allowed = 0;
60517 @@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
60518 */
60519 task = get_proc_task(inode);
60520 if (task) {
60521 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
60522 + if (log)
60523 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
60524 + else
60525 + allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
60526 put_task_struct(task);
60527 }
60528 return allowed;
60529 @@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
60530 struct task_struct *task,
60531 int hide_pid_min)
60532 {
60533 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
60534 + return false;
60535 +
60536 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60537 + rcu_read_lock();
60538 + {
60539 + const struct cred *tmpcred = current_cred();
60540 + const struct cred *cred = __task_cred(task);
60541 +
60542 + if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
60543 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
60544 + || in_group_p(grsec_proc_gid)
60545 +#endif
60546 + ) {
60547 + rcu_read_unlock();
60548 + return true;
60549 + }
60550 + }
60551 + rcu_read_unlock();
60552 +
60553 + if (!pid->hide_pid)
60554 + return false;
60555 +#endif
60556 +
60557 if (pid->hide_pid < hide_pid_min)
60558 return true;
60559 if (in_group_p(pid->pid_gid))
60560 return true;
60561 +
60562 return ptrace_may_access(task, PTRACE_MODE_READ);
60563 }
60564
60565 @@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
60566 put_task_struct(task);
60567
60568 if (!has_perms) {
60569 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60570 + {
60571 +#else
60572 if (pid->hide_pid == 2) {
60573 +#endif
60574 /*
60575 * Let's make getdents(), stat(), and open()
60576 * consistent with each other. If a process
60577 @@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
60578 if (!task)
60579 return -ESRCH;
60580
60581 + if (gr_acl_handle_procpidmem(task)) {
60582 + put_task_struct(task);
60583 + return -EPERM;
60584 + }
60585 +
60586 mm = mm_access(task, mode);
60587 put_task_struct(task);
60588
60589 @@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
60590
60591 file->private_data = mm;
60592
60593 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60594 + file->f_version = current->exec_id;
60595 +#endif
60596 +
60597 return 0;
60598 }
60599
60600 @@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
60601 ssize_t copied;
60602 char *page;
60603
60604 +#ifdef CONFIG_GRKERNSEC
60605 + if (write)
60606 + return -EPERM;
60607 +#endif
60608 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60609 + if (file->f_version != current->exec_id) {
60610 + gr_log_badprocpid("mem");
60611 + return 0;
60612 + }
60613 +#endif
60614 +
60615 if (!mm)
60616 return 0;
60617
60618 @@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
60619 goto free;
60620
60621 while (count > 0) {
60622 - int this_len = min_t(int, count, PAGE_SIZE);
60623 + ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
60624
60625 if (write && copy_from_user(page, buf, this_len)) {
60626 copied = -EFAULT;
60627 @@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
60628 if (!mm)
60629 return 0;
60630
60631 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60632 + if (file->f_version != current->exec_id) {
60633 + gr_log_badprocpid("environ");
60634 + return 0;
60635 + }
60636 +#endif
60637 +
60638 page = (char *)__get_free_page(GFP_TEMPORARY);
60639 if (!page)
60640 return -ENOMEM;
60641 @@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
60642 goto free;
60643 while (count > 0) {
60644 size_t this_len, max_len;
60645 - int retval;
60646 + ssize_t retval;
60647
60648 if (src >= (mm->env_end - mm->env_start))
60649 break;
60650 @@ -1461,7 +1547,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
60651 int error = -EACCES;
60652
60653 /* Are we allowed to snoop on the tasks file descriptors? */
60654 - if (!proc_fd_access_allowed(inode))
60655 + if (!proc_fd_access_allowed(inode, 0))
60656 goto out;
60657
60658 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
60659 @@ -1505,8 +1591,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
60660 struct path path;
60661
60662 /* Are we allowed to snoop on the tasks file descriptors? */
60663 - if (!proc_fd_access_allowed(inode))
60664 - goto out;
60665 + /* logging this is needed for learning on chromium to work properly,
60666 + but we don't want to flood the logs from 'ps' which does a readlink
60667 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
60668 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
60669 + */
60670 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
60671 + if (!proc_fd_access_allowed(inode,0))
60672 + goto out;
60673 + } else {
60674 + if (!proc_fd_access_allowed(inode,1))
60675 + goto out;
60676 + }
60677
60678 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
60679 if (error)
60680 @@ -1556,7 +1652,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
60681 rcu_read_lock();
60682 cred = __task_cred(task);
60683 inode->i_uid = cred->euid;
60684 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
60685 + inode->i_gid = grsec_proc_gid;
60686 +#else
60687 inode->i_gid = cred->egid;
60688 +#endif
60689 rcu_read_unlock();
60690 }
60691 security_task_to_inode(task, inode);
60692 @@ -1592,10 +1692,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
60693 return -ENOENT;
60694 }
60695 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
60696 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60697 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
60698 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60699 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
60700 +#endif
60701 task_dumpable(task)) {
60702 cred = __task_cred(task);
60703 stat->uid = cred->euid;
60704 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
60705 + stat->gid = grsec_proc_gid;
60706 +#else
60707 stat->gid = cred->egid;
60708 +#endif
60709 }
60710 }
60711 rcu_read_unlock();
60712 @@ -1633,11 +1742,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
60713
60714 if (task) {
60715 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
60716 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60717 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
60718 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60719 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
60720 +#endif
60721 task_dumpable(task)) {
60722 rcu_read_lock();
60723 cred = __task_cred(task);
60724 inode->i_uid = cred->euid;
60725 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
60726 + inode->i_gid = grsec_proc_gid;
60727 +#else
60728 inode->i_gid = cred->egid;
60729 +#endif
60730 rcu_read_unlock();
60731 } else {
60732 inode->i_uid = GLOBAL_ROOT_UID;
60733 @@ -2166,6 +2284,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
60734 if (!task)
60735 goto out_no_task;
60736
60737 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
60738 + goto out;
60739 +
60740 /*
60741 * Yes, it does not scale. And it should not. Don't add
60742 * new entries into /proc/<tgid>/ without very good reasons.
60743 @@ -2196,6 +2317,9 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
60744 if (!task)
60745 return -ENOENT;
60746
60747 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
60748 + goto out;
60749 +
60750 if (!dir_emit_dots(file, ctx))
60751 goto out;
60752
60753 @@ -2585,7 +2709,7 @@ static const struct pid_entry tgid_base_stuff[] = {
60754 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
60755 #endif
60756 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
60757 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
60758 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
60759 INF("syscall", S_IRUGO, proc_pid_syscall),
60760 #endif
60761 INF("cmdline", S_IRUGO, proc_pid_cmdline),
60762 @@ -2610,10 +2734,10 @@ static const struct pid_entry tgid_base_stuff[] = {
60763 #ifdef CONFIG_SECURITY
60764 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
60765 #endif
60766 -#ifdef CONFIG_KALLSYMS
60767 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60768 INF("wchan", S_IRUGO, proc_pid_wchan),
60769 #endif
60770 -#ifdef CONFIG_STACKTRACE
60771 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60772 ONE("stack", S_IRUGO, proc_pid_stack),
60773 #endif
60774 #ifdef CONFIG_SCHEDSTATS
60775 @@ -2647,6 +2771,9 @@ static const struct pid_entry tgid_base_stuff[] = {
60776 #ifdef CONFIG_HARDWALL
60777 INF("hardwall", S_IRUGO, proc_pid_hardwall),
60778 #endif
60779 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
60780 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
60781 +#endif
60782 #ifdef CONFIG_USER_NS
60783 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
60784 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
60785 @@ -2777,7 +2904,14 @@ static int proc_pid_instantiate(struct inode *dir,
60786 if (!inode)
60787 goto out;
60788
60789 +#ifdef CONFIG_GRKERNSEC_PROC_USER
60790 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
60791 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60792 + inode->i_gid = grsec_proc_gid;
60793 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
60794 +#else
60795 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
60796 +#endif
60797 inode->i_op = &proc_tgid_base_inode_operations;
60798 inode->i_fop = &proc_tgid_base_operations;
60799 inode->i_flags|=S_IMMUTABLE;
60800 @@ -2815,7 +2949,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
60801 if (!task)
60802 goto out;
60803
60804 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
60805 + goto out_put_task;
60806 +
60807 result = proc_pid_instantiate(dir, dentry, task, NULL);
60808 +out_put_task:
60809 put_task_struct(task);
60810 out:
60811 return ERR_PTR(result);
60812 @@ -2921,7 +3059,7 @@ static const struct pid_entry tid_base_stuff[] = {
60813 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
60814 #endif
60815 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
60816 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
60817 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
60818 INF("syscall", S_IRUGO, proc_pid_syscall),
60819 #endif
60820 INF("cmdline", S_IRUGO, proc_pid_cmdline),
60821 @@ -2948,10 +3086,10 @@ static const struct pid_entry tid_base_stuff[] = {
60822 #ifdef CONFIG_SECURITY
60823 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
60824 #endif
60825 -#ifdef CONFIG_KALLSYMS
60826 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60827 INF("wchan", S_IRUGO, proc_pid_wchan),
60828 #endif
60829 -#ifdef CONFIG_STACKTRACE
60830 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60831 ONE("stack", S_IRUGO, proc_pid_stack),
60832 #endif
60833 #ifdef CONFIG_SCHEDSTATS
60834 diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
60835 index 82676e3..5f8518a 100644
60836 --- a/fs/proc/cmdline.c
60837 +++ b/fs/proc/cmdline.c
60838 @@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
60839
60840 static int __init proc_cmdline_init(void)
60841 {
60842 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
60843 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
60844 +#else
60845 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
60846 +#endif
60847 return 0;
60848 }
60849 module_init(proc_cmdline_init);
60850 diff --git a/fs/proc/devices.c b/fs/proc/devices.c
60851 index b143471..bb105e5 100644
60852 --- a/fs/proc/devices.c
60853 +++ b/fs/proc/devices.c
60854 @@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
60855
60856 static int __init proc_devices_init(void)
60857 {
60858 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
60859 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
60860 +#else
60861 proc_create("devices", 0, NULL, &proc_devinfo_operations);
60862 +#endif
60863 return 0;
60864 }
60865 module_init(proc_devices_init);
60866 diff --git a/fs/proc/fd.c b/fs/proc/fd.c
60867 index 985ea88..d118a0a 100644
60868 --- a/fs/proc/fd.c
60869 +++ b/fs/proc/fd.c
60870 @@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
60871 if (!task)
60872 return -ENOENT;
60873
60874 - files = get_files_struct(task);
60875 + if (!gr_acl_handle_procpidmem(task))
60876 + files = get_files_struct(task);
60877 put_task_struct(task);
60878
60879 if (files) {
60880 @@ -283,11 +284,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
60881 */
60882 int proc_fd_permission(struct inode *inode, int mask)
60883 {
60884 + struct task_struct *task;
60885 int rv = generic_permission(inode, mask);
60886 - if (rv == 0)
60887 - return 0;
60888 +
60889 if (task_tgid(current) == proc_pid(inode))
60890 rv = 0;
60891 +
60892 + task = get_proc_task(inode);
60893 + if (task == NULL)
60894 + return rv;
60895 +
60896 + if (gr_acl_handle_procpidmem(task))
60897 + rv = -EACCES;
60898 +
60899 + put_task_struct(task);
60900 +
60901 return rv;
60902 }
60903
60904 diff --git a/fs/proc/inode.c b/fs/proc/inode.c
60905 index 8eaa1ba..cc6ff42 100644
60906 --- a/fs/proc/inode.c
60907 +++ b/fs/proc/inode.c
60908 @@ -23,11 +23,17 @@
60909 #include <linux/slab.h>
60910 #include <linux/mount.h>
60911 #include <linux/magic.h>
60912 +#include <linux/grsecurity.h>
60913
60914 #include <asm/uaccess.h>
60915
60916 #include "internal.h"
60917
60918 +#ifdef CONFIG_PROC_SYSCTL
60919 +extern const struct inode_operations proc_sys_inode_operations;
60920 +extern const struct inode_operations proc_sys_dir_operations;
60921 +#endif
60922 +
60923 static void proc_evict_inode(struct inode *inode)
60924 {
60925 struct proc_dir_entry *de;
60926 @@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
60927 ns = PROC_I(inode)->ns.ns;
60928 if (ns_ops && ns)
60929 ns_ops->put(ns);
60930 +
60931 +#ifdef CONFIG_PROC_SYSCTL
60932 + if (inode->i_op == &proc_sys_inode_operations ||
60933 + inode->i_op == &proc_sys_dir_operations)
60934 + gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
60935 +#endif
60936 +
60937 }
60938
60939 static struct kmem_cache * proc_inode_cachep;
60940 @@ -405,7 +418,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
60941 if (de->mode) {
60942 inode->i_mode = de->mode;
60943 inode->i_uid = de->uid;
60944 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
60945 + inode->i_gid = grsec_proc_gid;
60946 +#else
60947 inode->i_gid = de->gid;
60948 +#endif
60949 }
60950 if (de->size)
60951 inode->i_size = de->size;
60952 diff --git a/fs/proc/internal.h b/fs/proc/internal.h
60953 index 651d09a..60c73ae 100644
60954 --- a/fs/proc/internal.h
60955 +++ b/fs/proc/internal.h
60956 @@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
60957 struct pid *, struct task_struct *);
60958 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
60959 struct pid *, struct task_struct *);
60960 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
60961 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
60962 +#endif
60963
60964 /*
60965 * base.c
60966 diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
60967 index 06ea155..9a798c7 100644
60968 --- a/fs/proc/kcore.c
60969 +++ b/fs/proc/kcore.c
60970 @@ -484,9 +484,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
60971 * the addresses in the elf_phdr on our list.
60972 */
60973 start = kc_offset_to_vaddr(*fpos - elf_buflen);
60974 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
60975 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
60976 + if (tsz > buflen)
60977 tsz = buflen;
60978 -
60979 +
60980 while (buflen) {
60981 struct kcore_list *m;
60982
60983 @@ -515,20 +516,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
60984 kfree(elf_buf);
60985 } else {
60986 if (kern_addr_valid(start)) {
60987 - unsigned long n;
60988 + char *elf_buf;
60989 + mm_segment_t oldfs;
60990
60991 - n = copy_to_user(buffer, (char *)start, tsz);
60992 - /*
60993 - * We cannot distinguish between fault on source
60994 - * and fault on destination. When this happens
60995 - * we clear too and hope it will trigger the
60996 - * EFAULT again.
60997 - */
60998 - if (n) {
60999 - if (clear_user(buffer + tsz - n,
61000 - n))
61001 + elf_buf = kmalloc(tsz, GFP_KERNEL);
61002 + if (!elf_buf)
61003 + return -ENOMEM;
61004 + oldfs = get_fs();
61005 + set_fs(KERNEL_DS);
61006 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
61007 + set_fs(oldfs);
61008 + if (copy_to_user(buffer, elf_buf, tsz)) {
61009 + kfree(elf_buf);
61010 return -EFAULT;
61011 + }
61012 }
61013 + set_fs(oldfs);
61014 + kfree(elf_buf);
61015 } else {
61016 if (clear_user(buffer, tsz))
61017 return -EFAULT;
61018 @@ -548,6 +552,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
61019
61020 static int open_kcore(struct inode *inode, struct file *filp)
61021 {
61022 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
61023 + return -EPERM;
61024 +#endif
61025 if (!capable(CAP_SYS_RAWIO))
61026 return -EPERM;
61027 if (kcore_need_update)
61028 diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
61029 index 59d85d6..ac6fc05 100644
61030 --- a/fs/proc/meminfo.c
61031 +++ b/fs/proc/meminfo.c
61032 @@ -153,7 +153,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
61033 vmi.used >> 10,
61034 vmi.largest_chunk >> 10
61035 #ifdef CONFIG_MEMORY_FAILURE
61036 - ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
61037 + ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
61038 #endif
61039 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
61040 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
61041 diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
61042 index ccfd99b..1b7e255 100644
61043 --- a/fs/proc/nommu.c
61044 +++ b/fs/proc/nommu.c
61045 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
61046 if (len < 1)
61047 len = 1;
61048 seq_printf(m, "%*c", len, ' ');
61049 - seq_path(m, &file->f_path, "");
61050 + seq_path(m, &file->f_path, "\n\\");
61051 }
61052
61053 seq_putc(m, '\n');
61054 diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
61055 index 4677bb7..408e936 100644
61056 --- a/fs/proc/proc_net.c
61057 +++ b/fs/proc/proc_net.c
61058 @@ -23,6 +23,7 @@
61059 #include <linux/nsproxy.h>
61060 #include <net/net_namespace.h>
61061 #include <linux/seq_file.h>
61062 +#include <linux/grsecurity.h>
61063
61064 #include "internal.h"
61065
61066 @@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
61067 struct task_struct *task;
61068 struct nsproxy *ns;
61069 struct net *net = NULL;
61070 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61071 + const struct cred *cred = current_cred();
61072 +#endif
61073 +
61074 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61075 + if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
61076 + return net;
61077 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61078 + if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
61079 + return net;
61080 +#endif
61081
61082 rcu_read_lock();
61083 task = pid_task(proc_pid(dir), PIDTYPE_PID);
61084 diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
61085 index 7129046..6914844 100644
61086 --- a/fs/proc/proc_sysctl.c
61087 +++ b/fs/proc/proc_sysctl.c
61088 @@ -11,13 +11,21 @@
61089 #include <linux/namei.h>
61090 #include <linux/mm.h>
61091 #include <linux/module.h>
61092 +#include <linux/nsproxy.h>
61093 +#ifdef CONFIG_GRKERNSEC
61094 +#include <net/net_namespace.h>
61095 +#endif
61096 #include "internal.h"
61097
61098 +extern int gr_handle_chroot_sysctl(const int op);
61099 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
61100 + const int op);
61101 +
61102 static const struct dentry_operations proc_sys_dentry_operations;
61103 static const struct file_operations proc_sys_file_operations;
61104 -static const struct inode_operations proc_sys_inode_operations;
61105 +const struct inode_operations proc_sys_inode_operations;
61106 static const struct file_operations proc_sys_dir_file_operations;
61107 -static const struct inode_operations proc_sys_dir_operations;
61108 +const struct inode_operations proc_sys_dir_operations;
61109
61110 void proc_sys_poll_notify(struct ctl_table_poll *poll)
61111 {
61112 @@ -467,6 +475,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
61113
61114 err = NULL;
61115 d_set_d_op(dentry, &proc_sys_dentry_operations);
61116 +
61117 + gr_handle_proc_create(dentry, inode);
61118 +
61119 d_add(dentry, inode);
61120
61121 out:
61122 @@ -482,6 +493,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
61123 struct inode *inode = file_inode(filp);
61124 struct ctl_table_header *head = grab_header(inode);
61125 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
61126 + int op = write ? MAY_WRITE : MAY_READ;
61127 ssize_t error;
61128 size_t res;
61129
61130 @@ -493,7 +505,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
61131 * and won't be until we finish.
61132 */
61133 error = -EPERM;
61134 - if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
61135 + if (sysctl_perm(head, table, op))
61136 goto out;
61137
61138 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
61139 @@ -501,6 +513,27 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
61140 if (!table->proc_handler)
61141 goto out;
61142
61143 +#ifdef CONFIG_GRKERNSEC
61144 + error = -EPERM;
61145 + if (gr_handle_chroot_sysctl(op))
61146 + goto out;
61147 + dget(filp->f_path.dentry);
61148 + if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
61149 + dput(filp->f_path.dentry);
61150 + goto out;
61151 + }
61152 + dput(filp->f_path.dentry);
61153 + if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
61154 + goto out;
61155 + if (write) {
61156 + if (current->nsproxy->net_ns != table->extra2) {
61157 + if (!capable(CAP_SYS_ADMIN))
61158 + goto out;
61159 + } else if (!ns_capable(current->nsproxy->net_ns->user_ns, CAP_NET_ADMIN))
61160 + goto out;
61161 + }
61162 +#endif
61163 +
61164 /* careful: calling conventions are nasty here */
61165 res = count;
61166 error = table->proc_handler(table, write, buf, &res, ppos);
61167 @@ -598,6 +631,9 @@ static bool proc_sys_fill_cache(struct file *file,
61168 return false;
61169 } else {
61170 d_set_d_op(child, &proc_sys_dentry_operations);
61171 +
61172 + gr_handle_proc_create(child, inode);
61173 +
61174 d_add(child, inode);
61175 }
61176 } else {
61177 @@ -641,6 +677,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
61178 if ((*pos)++ < ctx->pos)
61179 return true;
61180
61181 + if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
61182 + return 0;
61183 +
61184 if (unlikely(S_ISLNK(table->mode)))
61185 res = proc_sys_link_fill_cache(file, ctx, head, table);
61186 else
61187 @@ -734,6 +773,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
61188 if (IS_ERR(head))
61189 return PTR_ERR(head);
61190
61191 + if (table && !gr_acl_handle_hidden_file(dentry, mnt))
61192 + return -ENOENT;
61193 +
61194 generic_fillattr(inode, stat);
61195 if (table)
61196 stat->mode = (stat->mode & S_IFMT) | table->mode;
61197 @@ -756,13 +798,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
61198 .llseek = generic_file_llseek,
61199 };
61200
61201 -static const struct inode_operations proc_sys_inode_operations = {
61202 +const struct inode_operations proc_sys_inode_operations = {
61203 .permission = proc_sys_permission,
61204 .setattr = proc_sys_setattr,
61205 .getattr = proc_sys_getattr,
61206 };
61207
61208 -static const struct inode_operations proc_sys_dir_operations = {
61209 +const struct inode_operations proc_sys_dir_operations = {
61210 .lookup = proc_sys_lookup,
61211 .permission = proc_sys_permission,
61212 .setattr = proc_sys_setattr,
61213 @@ -839,7 +881,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
61214 static struct ctl_dir *new_dir(struct ctl_table_set *set,
61215 const char *name, int namelen)
61216 {
61217 - struct ctl_table *table;
61218 + ctl_table_no_const *table;
61219 struct ctl_dir *new;
61220 struct ctl_node *node;
61221 char *new_name;
61222 @@ -851,7 +893,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
61223 return NULL;
61224
61225 node = (struct ctl_node *)(new + 1);
61226 - table = (struct ctl_table *)(node + 1);
61227 + table = (ctl_table_no_const *)(node + 1);
61228 new_name = (char *)(table + 2);
61229 memcpy(new_name, name, namelen);
61230 new_name[namelen] = '\0';
61231 @@ -1020,7 +1062,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
61232 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
61233 struct ctl_table_root *link_root)
61234 {
61235 - struct ctl_table *link_table, *entry, *link;
61236 + ctl_table_no_const *link_table, *link;
61237 + struct ctl_table *entry;
61238 struct ctl_table_header *links;
61239 struct ctl_node *node;
61240 char *link_name;
61241 @@ -1043,7 +1086,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
61242 return NULL;
61243
61244 node = (struct ctl_node *)(links + 1);
61245 - link_table = (struct ctl_table *)(node + nr_entries);
61246 + link_table = (ctl_table_no_const *)(node + nr_entries);
61247 link_name = (char *)&link_table[nr_entries + 1];
61248
61249 for (link = link_table, entry = table; entry->procname; link++, entry++) {
61250 @@ -1291,8 +1334,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
61251 struct ctl_table_header ***subheader, struct ctl_table_set *set,
61252 struct ctl_table *table)
61253 {
61254 - struct ctl_table *ctl_table_arg = NULL;
61255 - struct ctl_table *entry, *files;
61256 + ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
61257 + struct ctl_table *entry;
61258 int nr_files = 0;
61259 int nr_dirs = 0;
61260 int err = -ENOMEM;
61261 @@ -1304,10 +1347,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
61262 nr_files++;
61263 }
61264
61265 - files = table;
61266 /* If there are mixed files and directories we need a new table */
61267 if (nr_dirs && nr_files) {
61268 - struct ctl_table *new;
61269 + ctl_table_no_const *new;
61270 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
61271 GFP_KERNEL);
61272 if (!files)
61273 @@ -1325,7 +1367,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
61274 /* Register everything except a directory full of subdirectories */
61275 if (nr_files || !nr_dirs) {
61276 struct ctl_table_header *header;
61277 - header = __register_sysctl_table(set, path, files);
61278 + header = __register_sysctl_table(set, path, files ? files : table);
61279 if (!header) {
61280 kfree(ctl_table_arg);
61281 goto out;
61282 diff --git a/fs/proc/root.c b/fs/proc/root.c
61283 index 87dbcbe..55e1b4d 100644
61284 --- a/fs/proc/root.c
61285 +++ b/fs/proc/root.c
61286 @@ -186,7 +186,15 @@ void __init proc_root_init(void)
61287 #ifdef CONFIG_PROC_DEVICETREE
61288 proc_device_tree_init();
61289 #endif
61290 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
61291 +#ifdef CONFIG_GRKERNSEC_PROC_USER
61292 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
61293 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61294 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
61295 +#endif
61296 +#else
61297 proc_mkdir("bus", NULL);
61298 +#endif
61299 proc_sys_init();
61300 }
61301
61302 diff --git a/fs/proc/self.c b/fs/proc/self.c
61303 index 6b6a993..807cccc 100644
61304 --- a/fs/proc/self.c
61305 +++ b/fs/proc/self.c
61306 @@ -39,7 +39,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
61307 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
61308 void *cookie)
61309 {
61310 - char *s = nd_get_link(nd);
61311 + const char *s = nd_get_link(nd);
61312 if (!IS_ERR(s))
61313 kfree(s);
61314 }
61315 diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
61316 index 390bdab..83c1e8a 100644
61317 --- a/fs/proc/task_mmu.c
61318 +++ b/fs/proc/task_mmu.c
61319 @@ -12,12 +12,19 @@
61320 #include <linux/swap.h>
61321 #include <linux/swapops.h>
61322 #include <linux/mmu_notifier.h>
61323 +#include <linux/grsecurity.h>
61324
61325 #include <asm/elf.h>
61326 #include <asm/uaccess.h>
61327 #include <asm/tlbflush.h>
61328 #include "internal.h"
61329
61330 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61331 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
61332 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
61333 + _mm->pax_flags & MF_PAX_SEGMEXEC))
61334 +#endif
61335 +
61336 void task_mem(struct seq_file *m, struct mm_struct *mm)
61337 {
61338 unsigned long data, text, lib, swap;
61339 @@ -53,8 +60,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
61340 "VmExe:\t%8lu kB\n"
61341 "VmLib:\t%8lu kB\n"
61342 "VmPTE:\t%8lu kB\n"
61343 - "VmSwap:\t%8lu kB\n",
61344 - hiwater_vm << (PAGE_SHIFT-10),
61345 + "VmSwap:\t%8lu kB\n"
61346 +
61347 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61348 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
61349 +#endif
61350 +
61351 + ,hiwater_vm << (PAGE_SHIFT-10),
61352 total_vm << (PAGE_SHIFT-10),
61353 mm->locked_vm << (PAGE_SHIFT-10),
61354 mm->pinned_vm << (PAGE_SHIFT-10),
61355 @@ -63,7 +75,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
61356 data << (PAGE_SHIFT-10),
61357 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
61358 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
61359 - swap << (PAGE_SHIFT-10));
61360 + swap << (PAGE_SHIFT-10)
61361 +
61362 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
61363 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61364 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
61365 + , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
61366 +#else
61367 + , mm->context.user_cs_base
61368 + , mm->context.user_cs_limit
61369 +#endif
61370 +#endif
61371 +
61372 + );
61373 }
61374
61375 unsigned long task_vsize(struct mm_struct *mm)
61376 @@ -278,13 +302,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
61377 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
61378 }
61379
61380 - /* We don't show the stack guard page in /proc/maps */
61381 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61382 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
61383 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
61384 +#else
61385 start = vma->vm_start;
61386 - if (stack_guard_page_start(vma, start))
61387 - start += PAGE_SIZE;
61388 end = vma->vm_end;
61389 - if (stack_guard_page_end(vma, end))
61390 - end -= PAGE_SIZE;
61391 +#endif
61392
61393 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
61394 start,
61395 @@ -293,7 +317,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
61396 flags & VM_WRITE ? 'w' : '-',
61397 flags & VM_EXEC ? 'x' : '-',
61398 flags & VM_MAYSHARE ? 's' : 'p',
61399 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61400 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
61401 +#else
61402 pgoff,
61403 +#endif
61404 MAJOR(dev), MINOR(dev), ino, &len);
61405
61406 /*
61407 @@ -302,7 +330,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
61408 */
61409 if (file) {
61410 pad_len_spaces(m, len);
61411 - seq_path(m, &file->f_path, "\n");
61412 + seq_path(m, &file->f_path, "\n\\");
61413 goto done;
61414 }
61415
61416 @@ -328,8 +356,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
61417 * Thread stack in /proc/PID/task/TID/maps or
61418 * the main process stack.
61419 */
61420 - if (!is_pid || (vma->vm_start <= mm->start_stack &&
61421 - vma->vm_end >= mm->start_stack)) {
61422 + if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
61423 + (vma->vm_start <= mm->start_stack &&
61424 + vma->vm_end >= mm->start_stack)) {
61425 name = "[stack]";
61426 } else {
61427 /* Thread stack in /proc/PID/maps */
61428 @@ -353,6 +382,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
61429 struct proc_maps_private *priv = m->private;
61430 struct task_struct *task = priv->task;
61431
61432 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61433 + if (current->exec_id != m->exec_id) {
61434 + gr_log_badprocpid("maps");
61435 + return 0;
61436 + }
61437 +#endif
61438 +
61439 show_map_vma(m, vma, is_pid);
61440
61441 if (m->count < m->size) /* vma is copied successfully */
61442 @@ -590,12 +626,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
61443 .private = &mss,
61444 };
61445
61446 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61447 + if (current->exec_id != m->exec_id) {
61448 + gr_log_badprocpid("smaps");
61449 + return 0;
61450 + }
61451 +#endif
61452 memset(&mss, 0, sizeof mss);
61453 - mss.vma = vma;
61454 - /* mmap_sem is held in m_start */
61455 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
61456 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
61457 -
61458 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61459 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
61460 +#endif
61461 + mss.vma = vma;
61462 + /* mmap_sem is held in m_start */
61463 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
61464 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
61465 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61466 + }
61467 +#endif
61468 show_map_vma(m, vma, is_pid);
61469
61470 seq_printf(m,
61471 @@ -613,7 +660,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
61472 "KernelPageSize: %8lu kB\n"
61473 "MMUPageSize: %8lu kB\n"
61474 "Locked: %8lu kB\n",
61475 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61476 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
61477 +#else
61478 (vma->vm_end - vma->vm_start) >> 10,
61479 +#endif
61480 mss.resident >> 10,
61481 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
61482 mss.shared_clean >> 10,
61483 @@ -1390,6 +1441,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
61484 int n;
61485 char buffer[50];
61486
61487 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61488 + if (current->exec_id != m->exec_id) {
61489 + gr_log_badprocpid("numa_maps");
61490 + return 0;
61491 + }
61492 +#endif
61493 +
61494 if (!mm)
61495 return 0;
61496
61497 @@ -1409,11 +1467,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
61498 if (n < 0)
61499 return n;
61500
61501 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61502 + seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
61503 +#else
61504 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
61505 +#endif
61506
61507 if (file) {
61508 seq_printf(m, " file=");
61509 - seq_path(m, &file->f_path, "\n\t= ");
61510 + seq_path(m, &file->f_path, "\n\t\\= ");
61511 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
61512 seq_printf(m, " heap");
61513 } else {
61514 diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
61515 index 56123a6..5a2f6ec 100644
61516 --- a/fs/proc/task_nommu.c
61517 +++ b/fs/proc/task_nommu.c
61518 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
61519 else
61520 bytes += kobjsize(mm);
61521
61522 - if (current->fs && current->fs->users > 1)
61523 + if (current->fs && atomic_read(&current->fs->users) > 1)
61524 sbytes += kobjsize(current->fs);
61525 else
61526 bytes += kobjsize(current->fs);
61527 @@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
61528
61529 if (file) {
61530 pad_len_spaces(m, len);
61531 - seq_path(m, &file->f_path, "");
61532 + seq_path(m, &file->f_path, "\n\\");
61533 } else if (mm) {
61534 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
61535
61536 diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
61537 index 9100d69..f1f9fc9 100644
61538 --- a/fs/proc/vmcore.c
61539 +++ b/fs/proc/vmcore.c
61540 @@ -105,9 +105,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
61541 nr_bytes = count;
61542
61543 /* If pfn is not ram, return zeros for sparse dump files */
61544 - if (pfn_is_ram(pfn) == 0)
61545 - memset(buf, 0, nr_bytes);
61546 - else {
61547 + if (pfn_is_ram(pfn) == 0) {
61548 + if (userbuf) {
61549 + if (clear_user((char __force_user *)buf, nr_bytes))
61550 + return -EFAULT;
61551 + } else
61552 + memset(buf, 0, nr_bytes);
61553 + } else {
61554 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
61555 offset, userbuf);
61556 if (tmp < 0)
61557 @@ -233,7 +237,7 @@ static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
61558 if (*fpos < m->offset + m->size) {
61559 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
61560 start = m->paddr + *fpos - m->offset;
61561 - tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
61562 + tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, userbuf);
61563 if (tmp < 0)
61564 return tmp;
61565 buflen -= tsz;
61566 diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
61567 index b00fcc9..e0c6381 100644
61568 --- a/fs/qnx6/qnx6.h
61569 +++ b/fs/qnx6/qnx6.h
61570 @@ -74,7 +74,7 @@ enum {
61571 BYTESEX_BE,
61572 };
61573
61574 -static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
61575 +static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
61576 {
61577 if (sbi->s_bytesex == BYTESEX_LE)
61578 return le64_to_cpu((__force __le64)n);
61579 @@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
61580 return (__force __fs64)cpu_to_be64(n);
61581 }
61582
61583 -static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
61584 +static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
61585 {
61586 if (sbi->s_bytesex == BYTESEX_LE)
61587 return le32_to_cpu((__force __le32)n);
61588 diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
61589 index 16e8abb..2dcf914 100644
61590 --- a/fs/quota/netlink.c
61591 +++ b/fs/quota/netlink.c
61592 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
61593 void quota_send_warning(struct kqid qid, dev_t dev,
61594 const char warntype)
61595 {
61596 - static atomic_t seq;
61597 + static atomic_unchecked_t seq;
61598 struct sk_buff *skb;
61599 void *msg_head;
61600 int ret;
61601 @@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
61602 "VFS: Not enough memory to send quota warning.\n");
61603 return;
61604 }
61605 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
61606 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
61607 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
61608 if (!msg_head) {
61609 printk(KERN_ERR
61610 diff --git a/fs/read_write.c b/fs/read_write.c
61611 index e3cd280..a378473 100644
61612 --- a/fs/read_write.c
61613 +++ b/fs/read_write.c
61614 @@ -438,7 +438,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
61615
61616 old_fs = get_fs();
61617 set_fs(get_ds());
61618 - p = (__force const char __user *)buf;
61619 + p = (const char __force_user *)buf;
61620 if (count > MAX_RW_COUNT)
61621 count = MAX_RW_COUNT;
61622 if (file->f_op->write)
61623 diff --git a/fs/readdir.c b/fs/readdir.c
61624 index 93d71e5..6a14be8 100644
61625 --- a/fs/readdir.c
61626 +++ b/fs/readdir.c
61627 @@ -17,6 +17,7 @@
61628 #include <linux/security.h>
61629 #include <linux/syscalls.h>
61630 #include <linux/unistd.h>
61631 +#include <linux/namei.h>
61632
61633 #include <asm/uaccess.h>
61634
61635 @@ -69,6 +70,7 @@ struct old_linux_dirent {
61636 struct readdir_callback {
61637 struct dir_context ctx;
61638 struct old_linux_dirent __user * dirent;
61639 + struct file * file;
61640 int result;
61641 };
61642
61643 @@ -86,6 +88,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
61644 buf->result = -EOVERFLOW;
61645 return -EOVERFLOW;
61646 }
61647 +
61648 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61649 + return 0;
61650 +
61651 buf->result++;
61652 dirent = buf->dirent;
61653 if (!access_ok(VERIFY_WRITE, dirent,
61654 @@ -117,6 +123,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
61655 if (!f.file)
61656 return -EBADF;
61657
61658 + buf.file = f.file;
61659 error = iterate_dir(f.file, &buf.ctx);
61660 if (buf.result)
61661 error = buf.result;
61662 @@ -142,6 +149,7 @@ struct getdents_callback {
61663 struct dir_context ctx;
61664 struct linux_dirent __user * current_dir;
61665 struct linux_dirent __user * previous;
61666 + struct file * file;
61667 int count;
61668 int error;
61669 };
61670 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
61671 buf->error = -EOVERFLOW;
61672 return -EOVERFLOW;
61673 }
61674 +
61675 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61676 + return 0;
61677 +
61678 dirent = buf->previous;
61679 if (dirent) {
61680 if (__put_user(offset, &dirent->d_off))
61681 @@ -208,6 +220,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
61682 if (!f.file)
61683 return -EBADF;
61684
61685 + buf.file = f.file;
61686 error = iterate_dir(f.file, &buf.ctx);
61687 if (error >= 0)
61688 error = buf.error;
61689 @@ -226,6 +239,7 @@ struct getdents_callback64 {
61690 struct dir_context ctx;
61691 struct linux_dirent64 __user * current_dir;
61692 struct linux_dirent64 __user * previous;
61693 + struct file *file;
61694 int count;
61695 int error;
61696 };
61697 @@ -241,6 +255,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
61698 buf->error = -EINVAL; /* only used if we fail.. */
61699 if (reclen > buf->count)
61700 return -EINVAL;
61701 +
61702 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
61703 + return 0;
61704 +
61705 dirent = buf->previous;
61706 if (dirent) {
61707 if (__put_user(offset, &dirent->d_off))
61708 @@ -288,6 +306,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
61709 if (!f.file)
61710 return -EBADF;
61711
61712 + buf.file = f.file;
61713 error = iterate_dir(f.file, &buf.ctx);
61714 if (error >= 0)
61715 error = buf.error;
61716 diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
61717 index 2b7882b..1c5ef48 100644
61718 --- a/fs/reiserfs/do_balan.c
61719 +++ b/fs/reiserfs/do_balan.c
61720 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
61721 return;
61722 }
61723
61724 - atomic_inc(&(fs_generation(tb->tb_sb)));
61725 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
61726 do_balance_starts(tb);
61727
61728 /* balance leaf returns 0 except if combining L R and S into
61729 diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
61730 index a958444..42b2323 100644
61731 --- a/fs/reiserfs/procfs.c
61732 +++ b/fs/reiserfs/procfs.c
61733 @@ -114,7 +114,7 @@ static int show_super(struct seq_file *m, void *unused)
61734 "SMALL_TAILS " : "NO_TAILS ",
61735 replay_only(sb) ? "REPLAY_ONLY " : "",
61736 convert_reiserfs(sb) ? "CONV " : "",
61737 - atomic_read(&r->s_generation_counter),
61738 + atomic_read_unchecked(&r->s_generation_counter),
61739 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
61740 SF(s_do_balance), SF(s_unneeded_left_neighbor),
61741 SF(s_good_search_by_key_reada), SF(s_bmaps),
61742 diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
61743 index f8adaee..0eeeeca 100644
61744 --- a/fs/reiserfs/reiserfs.h
61745 +++ b/fs/reiserfs/reiserfs.h
61746 @@ -453,7 +453,7 @@ struct reiserfs_sb_info {
61747 /* Comment? -Hans */
61748 wait_queue_head_t s_wait;
61749 /* To be obsoleted soon by per buffer seals.. -Hans */
61750 - atomic_t s_generation_counter; // increased by one every time the
61751 + atomic_unchecked_t s_generation_counter; // increased by one every time the
61752 // tree gets re-balanced
61753 unsigned long s_properties; /* File system properties. Currently holds
61754 on-disk FS format */
61755 @@ -1982,7 +1982,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
61756 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
61757
61758 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
61759 -#define get_generation(s) atomic_read (&fs_generation(s))
61760 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
61761 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
61762 #define __fs_changed(gen,s) (gen != get_generation (s))
61763 #define fs_changed(gen,s) \
61764 diff --git a/fs/select.c b/fs/select.c
61765 index dfd5cb1..1754d57 100644
61766 --- a/fs/select.c
61767 +++ b/fs/select.c
61768 @@ -20,6 +20,7 @@
61769 #include <linux/export.h>
61770 #include <linux/slab.h>
61771 #include <linux/poll.h>
61772 +#include <linux/security.h>
61773 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
61774 #include <linux/file.h>
61775 #include <linux/fdtable.h>
61776 @@ -880,6 +881,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
61777 struct poll_list *walk = head;
61778 unsigned long todo = nfds;
61779
61780 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
61781 if (nfds > rlimit(RLIMIT_NOFILE))
61782 return -EINVAL;
61783
61784 diff --git a/fs/seq_file.c b/fs/seq_file.c
61785 index a290157..ec3211a 100644
61786 --- a/fs/seq_file.c
61787 +++ b/fs/seq_file.c
61788 @@ -10,6 +10,7 @@
61789 #include <linux/seq_file.h>
61790 #include <linux/slab.h>
61791 #include <linux/cred.h>
61792 +#include <linux/sched.h>
61793
61794 #include <asm/uaccess.h>
61795 #include <asm/page.h>
61796 @@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
61797 #ifdef CONFIG_USER_NS
61798 p->user_ns = file->f_cred->user_ns;
61799 #endif
61800 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61801 + p->exec_id = current->exec_id;
61802 +#endif
61803
61804 /*
61805 * Wrappers around seq_open(e.g. swaps_open) need to be
61806 @@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
61807 return 0;
61808 }
61809 if (!m->buf) {
61810 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
61811 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
61812 if (!m->buf)
61813 return -ENOMEM;
61814 }
61815 @@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
61816 Eoverflow:
61817 m->op->stop(m, p);
61818 kfree(m->buf);
61819 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
61820 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
61821 return !m->buf ? -ENOMEM : -EAGAIN;
61822 }
61823
61824 @@ -152,7 +156,7 @@ Eoverflow:
61825 ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
61826 {
61827 struct seq_file *m = file->private_data;
61828 - size_t copied = 0;
61829 + ssize_t copied = 0;
61830 loff_t pos;
61831 size_t n;
61832 void *p;
61833 @@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
61834
61835 /* grab buffer if we didn't have one */
61836 if (!m->buf) {
61837 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
61838 + m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
61839 if (!m->buf)
61840 goto Enomem;
61841 }
61842 @@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
61843 goto Fill;
61844 m->op->stop(m, p);
61845 kfree(m->buf);
61846 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
61847 + m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
61848 if (!m->buf)
61849 goto Enomem;
61850 m->count = 0;
61851 @@ -583,7 +587,7 @@ static void single_stop(struct seq_file *p, void *v)
61852 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
61853 void *data)
61854 {
61855 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
61856 + seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
61857 int res = -ENOMEM;
61858
61859 if (op) {
61860 diff --git a/fs/splice.c b/fs/splice.c
61861 index 3b7ee65..87fc2e4 100644
61862 --- a/fs/splice.c
61863 +++ b/fs/splice.c
61864 @@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
61865 pipe_lock(pipe);
61866
61867 for (;;) {
61868 - if (!pipe->readers) {
61869 + if (!atomic_read(&pipe->readers)) {
61870 send_sig(SIGPIPE, current, 0);
61871 if (!ret)
61872 ret = -EPIPE;
61873 @@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
61874 page_nr++;
61875 ret += buf->len;
61876
61877 - if (pipe->files)
61878 + if (atomic_read(&pipe->files))
61879 do_wakeup = 1;
61880
61881 if (!--spd->nr_pages)
61882 @@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
61883 do_wakeup = 0;
61884 }
61885
61886 - pipe->waiting_writers++;
61887 + atomic_inc(&pipe->waiting_writers);
61888 pipe_wait(pipe);
61889 - pipe->waiting_writers--;
61890 + atomic_dec(&pipe->waiting_writers);
61891 }
61892
61893 pipe_unlock(pipe);
61894 @@ -565,7 +565,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
61895 old_fs = get_fs();
61896 set_fs(get_ds());
61897 /* The cast to a user pointer is valid due to the set_fs() */
61898 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
61899 + res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
61900 set_fs(old_fs);
61901
61902 return res;
61903 @@ -580,7 +580,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
61904 old_fs = get_fs();
61905 set_fs(get_ds());
61906 /* The cast to a user pointer is valid due to the set_fs() */
61907 - res = vfs_write(file, (__force const char __user *)buf, count, &pos);
61908 + res = vfs_write(file, (const char __force_user *)buf, count, &pos);
61909 set_fs(old_fs);
61910
61911 return res;
61912 @@ -633,7 +633,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
61913 goto err;
61914
61915 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
61916 - vec[i].iov_base = (void __user *) page_address(page);
61917 + vec[i].iov_base = (void __force_user *) page_address(page);
61918 vec[i].iov_len = this_len;
61919 spd.pages[i] = page;
61920 spd.nr_pages++;
61921 @@ -829,7 +829,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
61922 ops->release(pipe, buf);
61923 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
61924 pipe->nrbufs--;
61925 - if (pipe->files)
61926 + if (atomic_read(&pipe->files))
61927 sd->need_wakeup = true;
61928 }
61929
61930 @@ -854,10 +854,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
61931 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
61932 {
61933 while (!pipe->nrbufs) {
61934 - if (!pipe->writers)
61935 + if (!atomic_read(&pipe->writers))
61936 return 0;
61937
61938 - if (!pipe->waiting_writers && sd->num_spliced)
61939 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
61940 return 0;
61941
61942 if (sd->flags & SPLICE_F_NONBLOCK)
61943 @@ -1179,7 +1179,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
61944 * out of the pipe right after the splice_to_pipe(). So set
61945 * PIPE_READERS appropriately.
61946 */
61947 - pipe->readers = 1;
61948 + atomic_set(&pipe->readers, 1);
61949
61950 current->splice_pipe = pipe;
61951 }
61952 @@ -1475,6 +1475,7 @@ static int get_iovec_page_array(const struct iovec __user *iov,
61953
61954 partial[buffers].offset = off;
61955 partial[buffers].len = plen;
61956 + partial[buffers].private = 0;
61957
61958 off = 0;
61959 len -= plen;
61960 @@ -1777,9 +1778,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
61961 ret = -ERESTARTSYS;
61962 break;
61963 }
61964 - if (!pipe->writers)
61965 + if (!atomic_read(&pipe->writers))
61966 break;
61967 - if (!pipe->waiting_writers) {
61968 + if (!atomic_read(&pipe->waiting_writers)) {
61969 if (flags & SPLICE_F_NONBLOCK) {
61970 ret = -EAGAIN;
61971 break;
61972 @@ -1811,7 +1812,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
61973 pipe_lock(pipe);
61974
61975 while (pipe->nrbufs >= pipe->buffers) {
61976 - if (!pipe->readers) {
61977 + if (!atomic_read(&pipe->readers)) {
61978 send_sig(SIGPIPE, current, 0);
61979 ret = -EPIPE;
61980 break;
61981 @@ -1824,9 +1825,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
61982 ret = -ERESTARTSYS;
61983 break;
61984 }
61985 - pipe->waiting_writers++;
61986 + atomic_inc(&pipe->waiting_writers);
61987 pipe_wait(pipe);
61988 - pipe->waiting_writers--;
61989 + atomic_dec(&pipe->waiting_writers);
61990 }
61991
61992 pipe_unlock(pipe);
61993 @@ -1862,14 +1863,14 @@ retry:
61994 pipe_double_lock(ipipe, opipe);
61995
61996 do {
61997 - if (!opipe->readers) {
61998 + if (!atomic_read(&opipe->readers)) {
61999 send_sig(SIGPIPE, current, 0);
62000 if (!ret)
62001 ret = -EPIPE;
62002 break;
62003 }
62004
62005 - if (!ipipe->nrbufs && !ipipe->writers)
62006 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
62007 break;
62008
62009 /*
62010 @@ -1966,7 +1967,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
62011 pipe_double_lock(ipipe, opipe);
62012
62013 do {
62014 - if (!opipe->readers) {
62015 + if (!atomic_read(&opipe->readers)) {
62016 send_sig(SIGPIPE, current, 0);
62017 if (!ret)
62018 ret = -EPIPE;
62019 @@ -2011,7 +2012,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
62020 * return EAGAIN if we have the potential of some data in the
62021 * future, otherwise just return 0
62022 */
62023 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
62024 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
62025 ret = -EAGAIN;
62026
62027 pipe_unlock(ipipe);
62028 diff --git a/fs/stat.c b/fs/stat.c
62029 index ae0c3ce..9ee641c 100644
62030 --- a/fs/stat.c
62031 +++ b/fs/stat.c
62032 @@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
62033 stat->gid = inode->i_gid;
62034 stat->rdev = inode->i_rdev;
62035 stat->size = i_size_read(inode);
62036 - stat->atime = inode->i_atime;
62037 - stat->mtime = inode->i_mtime;
62038 + if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
62039 + stat->atime = inode->i_ctime;
62040 + stat->mtime = inode->i_ctime;
62041 + } else {
62042 + stat->atime = inode->i_atime;
62043 + stat->mtime = inode->i_mtime;
62044 + }
62045 stat->ctime = inode->i_ctime;
62046 stat->blksize = (1 << inode->i_blkbits);
62047 stat->blocks = inode->i_blocks;
62048 @@ -52,9 +57,16 @@ EXPORT_SYMBOL(generic_fillattr);
62049 int vfs_getattr_nosec(struct path *path, struct kstat *stat)
62050 {
62051 struct inode *inode = path->dentry->d_inode;
62052 + int retval;
62053
62054 - if (inode->i_op->getattr)
62055 - return inode->i_op->getattr(path->mnt, path->dentry, stat);
62056 + if (inode->i_op->getattr) {
62057 + retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
62058 + if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
62059 + stat->atime = stat->ctime;
62060 + stat->mtime = stat->ctime;
62061 + }
62062 + return retval;
62063 + }
62064
62065 generic_fillattr(inode, stat);
62066 return 0;
62067 diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
62068 index c590cab..6dfd6fc 100644
62069 --- a/fs/sysfs/bin.c
62070 +++ b/fs/sysfs/bin.c
62071 @@ -234,13 +234,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
62072 return ret;
62073 }
62074
62075 -static int bin_access(struct vm_area_struct *vma, unsigned long addr,
62076 - void *buf, int len, int write)
62077 +static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
62078 + void *buf, size_t len, int write)
62079 {
62080 struct file *file = vma->vm_file;
62081 struct bin_buffer *bb = file->private_data;
62082 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
62083 - int ret;
62084 + ssize_t ret;
62085
62086 if (!bb->vm_ops)
62087 return -EINVAL;
62088 diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
62089 index 4d83ced..049dc45 100644
62090 --- a/fs/sysfs/dir.c
62091 +++ b/fs/sysfs/dir.c
62092 @@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
62093 *
62094 * Returns 31 bit hash of ns + name (so it fits in an off_t )
62095 */
62096 -static unsigned int sysfs_name_hash(const void *ns, const char *name)
62097 +static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
62098 {
62099 unsigned long hash = init_name_hash();
62100 unsigned int len = strlen(name);
62101 @@ -675,6 +675,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
62102 struct sysfs_dirent *sd;
62103 int rc;
62104
62105 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
62106 + const char *parent_name = parent_sd->s_name;
62107 +
62108 + mode = S_IFDIR | S_IRWXU;
62109 +
62110 + if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
62111 + (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
62112 + (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
62113 + (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
62114 + mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
62115 +#endif
62116 +
62117 /* allocate */
62118 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
62119 if (!sd)
62120 diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
62121 index 15ef5eb..e474372 100644
62122 --- a/fs/sysfs/file.c
62123 +++ b/fs/sysfs/file.c
62124 @@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
62125
62126 struct sysfs_open_dirent {
62127 atomic_t refcnt;
62128 - atomic_t event;
62129 + atomic_unchecked_t event;
62130 wait_queue_head_t poll;
62131 struct list_head buffers; /* goes through sysfs_buffer.list */
62132 };
62133 @@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry *dentry, struct sysfs_buffer *buffer)
62134 if (!sysfs_get_active(attr_sd))
62135 return -ENODEV;
62136
62137 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
62138 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
62139 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
62140
62141 sysfs_put_active(attr_sd);
62142 @@ -284,7 +284,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
62143 return -ENOMEM;
62144
62145 atomic_set(&new_od->refcnt, 0);
62146 - atomic_set(&new_od->event, 1);
62147 + atomic_set_unchecked(&new_od->event, 1);
62148 init_waitqueue_head(&new_od->poll);
62149 INIT_LIST_HEAD(&new_od->buffers);
62150 goto retry;
62151 @@ -430,7 +430,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
62152
62153 sysfs_put_active(attr_sd);
62154
62155 - if (buffer->event != atomic_read(&od->event))
62156 + if (buffer->event != atomic_read_unchecked(&od->event))
62157 goto trigger;
62158
62159 return DEFAULT_POLLMASK;
62160 @@ -450,7 +450,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
62161 if (!WARN_ON(sysfs_type(sd) != SYSFS_KOBJ_ATTR)) {
62162 od = sd->s_attr.open;
62163 if (od) {
62164 - atomic_inc(&od->event);
62165 + atomic_inc_unchecked(&od->event);
62166 wake_up_interruptible(&od->poll);
62167 }
62168 }
62169 diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
62170 index 2dd4507..62a215a 100644
62171 --- a/fs/sysfs/symlink.c
62172 +++ b/fs/sysfs/symlink.c
62173 @@ -308,7 +308,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
62174 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd,
62175 void *cookie)
62176 {
62177 - char *page = nd_get_link(nd);
62178 + const char *page = nd_get_link(nd);
62179 if (!IS_ERR(page))
62180 free_page((unsigned long)page);
62181 }
62182 diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
62183 index 69d4889..a810bd4 100644
62184 --- a/fs/sysv/sysv.h
62185 +++ b/fs/sysv/sysv.h
62186 @@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
62187 #endif
62188 }
62189
62190 -static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
62191 +static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
62192 {
62193 if (sbi->s_bytesex == BYTESEX_PDP)
62194 return PDP_swab((__force __u32)n);
62195 diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
62196 index e18b988..f1d4ad0f 100644
62197 --- a/fs/ubifs/io.c
62198 +++ b/fs/ubifs/io.c
62199 @@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
62200 return err;
62201 }
62202
62203 -int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
62204 +int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
62205 {
62206 int err;
62207
62208 diff --git a/fs/udf/misc.c b/fs/udf/misc.c
62209 index c175b4d..8f36a16 100644
62210 --- a/fs/udf/misc.c
62211 +++ b/fs/udf/misc.c
62212 @@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
62213
62214 u8 udf_tag_checksum(const struct tag *t)
62215 {
62216 - u8 *data = (u8 *)t;
62217 + const u8 *data = (const u8 *)t;
62218 u8 checksum = 0;
62219 int i;
62220 for (i = 0; i < sizeof(struct tag); ++i)
62221 diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
62222 index 8d974c4..b82f6ec 100644
62223 --- a/fs/ufs/swab.h
62224 +++ b/fs/ufs/swab.h
62225 @@ -22,7 +22,7 @@ enum {
62226 BYTESEX_BE
62227 };
62228
62229 -static inline u64
62230 +static inline u64 __intentional_overflow(-1)
62231 fs64_to_cpu(struct super_block *sbp, __fs64 n)
62232 {
62233 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
62234 @@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
62235 return (__force __fs64)cpu_to_be64(n);
62236 }
62237
62238 -static inline u32
62239 +static inline u32 __intentional_overflow(-1)
62240 fs32_to_cpu(struct super_block *sbp, __fs32 n)
62241 {
62242 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
62243 diff --git a/fs/utimes.c b/fs/utimes.c
62244 index f4fb7ec..3fe03c0 100644
62245 --- a/fs/utimes.c
62246 +++ b/fs/utimes.c
62247 @@ -1,6 +1,7 @@
62248 #include <linux/compiler.h>
62249 #include <linux/file.h>
62250 #include <linux/fs.h>
62251 +#include <linux/security.h>
62252 #include <linux/linkage.h>
62253 #include <linux/mount.h>
62254 #include <linux/namei.h>
62255 @@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
62256 goto mnt_drop_write_and_out;
62257 }
62258 }
62259 +
62260 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
62261 + error = -EACCES;
62262 + goto mnt_drop_write_and_out;
62263 + }
62264 +
62265 mutex_lock(&inode->i_mutex);
62266 error = notify_change(path->dentry, &newattrs);
62267 mutex_unlock(&inode->i_mutex);
62268 diff --git a/fs/xattr.c b/fs/xattr.c
62269 index 3377dff..f394815 100644
62270 --- a/fs/xattr.c
62271 +++ b/fs/xattr.c
62272 @@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
62273 return rc;
62274 }
62275
62276 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
62277 +ssize_t
62278 +pax_getxattr(struct dentry *dentry, void *value, size_t size)
62279 +{
62280 + struct inode *inode = dentry->d_inode;
62281 + ssize_t error;
62282 +
62283 + error = inode_permission(inode, MAY_EXEC);
62284 + if (error)
62285 + return error;
62286 +
62287 + if (inode->i_op->getxattr)
62288 + error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
62289 + else
62290 + error = -EOPNOTSUPP;
62291 +
62292 + return error;
62293 +}
62294 +EXPORT_SYMBOL(pax_getxattr);
62295 +#endif
62296 +
62297 ssize_t
62298 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
62299 {
62300 @@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
62301 * Extended attribute SET operations
62302 */
62303 static long
62304 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
62305 +setxattr(struct path *path, const char __user *name, const void __user *value,
62306 size_t size, int flags)
62307 {
62308 int error;
62309 @@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
62310 posix_acl_fix_xattr_from_user(kvalue, size);
62311 }
62312
62313 - error = vfs_setxattr(d, kname, kvalue, size, flags);
62314 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
62315 + error = -EACCES;
62316 + goto out;
62317 + }
62318 +
62319 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
62320 out:
62321 if (vvalue)
62322 vfree(vvalue);
62323 @@ -377,7 +403,7 @@ retry:
62324 return error;
62325 error = mnt_want_write(path.mnt);
62326 if (!error) {
62327 - error = setxattr(path.dentry, name, value, size, flags);
62328 + error = setxattr(&path, name, value, size, flags);
62329 mnt_drop_write(path.mnt);
62330 }
62331 path_put(&path);
62332 @@ -401,7 +427,7 @@ retry:
62333 return error;
62334 error = mnt_want_write(path.mnt);
62335 if (!error) {
62336 - error = setxattr(path.dentry, name, value, size, flags);
62337 + error = setxattr(&path, name, value, size, flags);
62338 mnt_drop_write(path.mnt);
62339 }
62340 path_put(&path);
62341 @@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
62342 const void __user *,value, size_t, size, int, flags)
62343 {
62344 struct fd f = fdget(fd);
62345 - struct dentry *dentry;
62346 int error = -EBADF;
62347
62348 if (!f.file)
62349 return error;
62350 - dentry = f.file->f_path.dentry;
62351 - audit_inode(NULL, dentry, 0);
62352 + audit_inode(NULL, f.file->f_path.dentry, 0);
62353 error = mnt_want_write_file(f.file);
62354 if (!error) {
62355 - error = setxattr(dentry, name, value, size, flags);
62356 + error = setxattr(&f.file->f_path, name, value, size, flags);
62357 mnt_drop_write_file(f.file);
62358 }
62359 fdput(f);
62360 @@ -626,7 +650,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
62361 * Extended attribute REMOVE operations
62362 */
62363 static long
62364 -removexattr(struct dentry *d, const char __user *name)
62365 +removexattr(struct path *path, const char __user *name)
62366 {
62367 int error;
62368 char kname[XATTR_NAME_MAX + 1];
62369 @@ -637,7 +661,10 @@ removexattr(struct dentry *d, const char __user *name)
62370 if (error < 0)
62371 return error;
62372
62373 - return vfs_removexattr(d, kname);
62374 + if (!gr_acl_handle_removexattr(path->dentry, path->mnt))
62375 + return -EACCES;
62376 +
62377 + return vfs_removexattr(path->dentry, kname);
62378 }
62379
62380 SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
62381 @@ -652,7 +679,7 @@ retry:
62382 return error;
62383 error = mnt_want_write(path.mnt);
62384 if (!error) {
62385 - error = removexattr(path.dentry, name);
62386 + error = removexattr(&path, name);
62387 mnt_drop_write(path.mnt);
62388 }
62389 path_put(&path);
62390 @@ -675,7 +702,7 @@ retry:
62391 return error;
62392 error = mnt_want_write(path.mnt);
62393 if (!error) {
62394 - error = removexattr(path.dentry, name);
62395 + error = removexattr(&path, name);
62396 mnt_drop_write(path.mnt);
62397 }
62398 path_put(&path);
62399 @@ -689,16 +716,16 @@ retry:
62400 SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
62401 {
62402 struct fd f = fdget(fd);
62403 - struct dentry *dentry;
62404 + struct path *path;
62405 int error = -EBADF;
62406
62407 if (!f.file)
62408 return error;
62409 - dentry = f.file->f_path.dentry;
62410 - audit_inode(NULL, dentry, 0);
62411 + path = &f.file->f_path;
62412 + audit_inode(NULL, path->dentry, 0);
62413 error = mnt_want_write_file(f.file);
62414 if (!error) {
62415 - error = removexattr(dentry, name);
62416 + error = removexattr(path, name);
62417 mnt_drop_write_file(f.file);
62418 }
62419 fdput(f);
62420 diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
62421 index 9fbea87..6b19972 100644
62422 --- a/fs/xattr_acl.c
62423 +++ b/fs/xattr_acl.c
62424 @@ -76,8 +76,8 @@ struct posix_acl *
62425 posix_acl_from_xattr(struct user_namespace *user_ns,
62426 const void *value, size_t size)
62427 {
62428 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
62429 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
62430 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
62431 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
62432 int count;
62433 struct posix_acl *acl;
62434 struct posix_acl_entry *acl_e;
62435 diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
62436 index f47e65c..e7125d9 100644
62437 --- a/fs/xfs/xfs_bmap.c
62438 +++ b/fs/xfs/xfs_bmap.c
62439 @@ -586,7 +586,7 @@ xfs_bmap_validate_ret(
62440
62441 #else
62442 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
62443 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
62444 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
62445 #endif /* DEBUG */
62446
62447 /*
62448 diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
62449 index 8f84153..7ce60d0 100644
62450 --- a/fs/xfs/xfs_dir2_readdir.c
62451 +++ b/fs/xfs/xfs_dir2_readdir.c
62452 @@ -160,7 +160,12 @@ xfs_dir2_sf_getdents(
62453 ino = xfs_dir3_sfe_get_ino(mp, sfp, sfep);
62454 filetype = xfs_dir3_sfe_get_ftype(mp, sfp, sfep);
62455 ctx->pos = off & 0x7fffffff;
62456 - if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
62457 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
62458 + char name[sfep->namelen];
62459 + memcpy(name, sfep->name, sfep->namelen);
62460 + if (!dir_emit(ctx, name, sfep->namelen, ino, xfs_dir3_get_dtype(mp, filetype)))
62461 + return 0;
62462 + } else if (!dir_emit(ctx, (char *)sfep->name, sfep->namelen, ino,
62463 xfs_dir3_get_dtype(mp, filetype)))
62464 return 0;
62465 sfep = xfs_dir3_sf_nextentry(mp, sfp, sfep);
62466 diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
62467 index 8c8ef24..689f742 100644
62468 --- a/fs/xfs/xfs_ioctl.c
62469 +++ b/fs/xfs/xfs_ioctl.c
62470 @@ -127,7 +127,7 @@ xfs_find_handle(
62471 }
62472
62473 error = -EFAULT;
62474 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
62475 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
62476 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
62477 goto out_put;
62478
62479 diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
62480 index 2b8952d..a60c6be 100644
62481 --- a/fs/xfs/xfs_iops.c
62482 +++ b/fs/xfs/xfs_iops.c
62483 @@ -401,7 +401,7 @@ xfs_vn_put_link(
62484 struct nameidata *nd,
62485 void *p)
62486 {
62487 - char *s = nd_get_link(nd);
62488 + const char *s = nd_get_link(nd);
62489
62490 if (!IS_ERR(s))
62491 kfree(s);
62492 diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
62493 new file mode 100644
62494 index 0000000..a78d810
62495 --- /dev/null
62496 +++ b/grsecurity/Kconfig
62497 @@ -0,0 +1,1107 @@
62498 +#
62499 +# grecurity configuration
62500 +#
62501 +menu "Memory Protections"
62502 +depends on GRKERNSEC
62503 +
62504 +config GRKERNSEC_KMEM
62505 + bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
62506 + default y if GRKERNSEC_CONFIG_AUTO
62507 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
62508 + help
62509 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
62510 + be written to or read from to modify or leak the contents of the running
62511 + kernel. /dev/port will also not be allowed to be opened, and support
62512 + for /dev/cpu/*/msr and kexec will be removed. If you have module
62513 + support disabled, enabling this will close up six ways that are
62514 + currently used to insert malicious code into the running kernel.
62515 +
62516 + Even with this feature enabled, we still highly recommend that
62517 + you use the RBAC system, as it is still possible for an attacker to
62518 + modify the running kernel through other more obscure methods.
62519 +
62520 + Enabling this feature will prevent the "cpupower" and "powertop" tools
62521 + from working.
62522 +
62523 + It is highly recommended that you say Y here if you meet all the
62524 + conditions above.
62525 +
62526 +config GRKERNSEC_VM86
62527 + bool "Restrict VM86 mode"
62528 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
62529 + depends on X86_32
62530 +
62531 + help
62532 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
62533 + make use of a special execution mode on 32bit x86 processors called
62534 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
62535 + video cards and will still work with this option enabled. The purpose
62536 + of the option is to prevent exploitation of emulation errors in
62537 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
62538 + Nearly all users should be able to enable this option.
62539 +
62540 +config GRKERNSEC_IO
62541 + bool "Disable privileged I/O"
62542 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
62543 + depends on X86
62544 + select RTC_CLASS
62545 + select RTC_INTF_DEV
62546 + select RTC_DRV_CMOS
62547 +
62548 + help
62549 + If you say Y here, all ioperm and iopl calls will return an error.
62550 + Ioperm and iopl can be used to modify the running kernel.
62551 + Unfortunately, some programs need this access to operate properly,
62552 + the most notable of which are XFree86 and hwclock. hwclock can be
62553 + remedied by having RTC support in the kernel, so real-time
62554 + clock support is enabled if this option is enabled, to ensure
62555 + that hwclock operates correctly.
62556 +
62557 + If you're using XFree86 or a version of Xorg from 2012 or earlier,
62558 + you may not be able to boot into a graphical environment with this
62559 + option enabled. In this case, you should use the RBAC system instead.
62560 +
62561 +config GRKERNSEC_JIT_HARDEN
62562 + bool "Harden BPF JIT against spray attacks"
62563 + default y if GRKERNSEC_CONFIG_AUTO
62564 + depends on BPF_JIT
62565 + help
62566 + If you say Y here, the native code generated by the kernel's Berkeley
62567 + Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
62568 + attacks that attempt to fit attacker-beneficial instructions in
62569 + 32bit immediate fields of JIT-generated native instructions. The
62570 + attacker will generally aim to cause an unintended instruction sequence
62571 + of JIT-generated native code to execute by jumping into the middle of
62572 + a generated instruction. This feature effectively randomizes the 32bit
62573 + immediate constants present in the generated code to thwart such attacks.
62574 +
62575 + If you're using KERNEXEC, it's recommended that you enable this option
62576 + to supplement the hardening of the kernel.
62577 +
62578 +config GRKERNSEC_PERF_HARDEN
62579 + bool "Disable unprivileged PERF_EVENTS usage by default"
62580 + default y if GRKERNSEC_CONFIG_AUTO
62581 + depends on PERF_EVENTS
62582 + help
62583 + If you say Y here, the range of acceptable values for the
62584 + /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
62585 + default to a new value: 3. When the sysctl is set to this value, no
62586 + unprivileged use of the PERF_EVENTS syscall interface will be permitted.
62587 +
62588 + Though PERF_EVENTS can be used legitimately for performance monitoring
62589 + and low-level application profiling, it is forced on regardless of
62590 + configuration, has been at fault for several vulnerabilities, and
62591 + creates new opportunities for side channels and other information leaks.
62592 +
62593 + This feature puts PERF_EVENTS into a secure default state and permits
62594 + the administrator to change out of it temporarily if unprivileged
62595 + application profiling is needed.
62596 +
62597 +config GRKERNSEC_RAND_THREADSTACK
62598 + bool "Insert random gaps between thread stacks"
62599 + default y if GRKERNSEC_CONFIG_AUTO
62600 + depends on PAX_RANDMMAP && !PPC
62601 + help
62602 + If you say Y here, a random-sized gap will be enforced between allocated
62603 + thread stacks. Glibc's NPTL and other threading libraries that
62604 + pass MAP_STACK to the kernel for thread stack allocation are supported.
62605 + The implementation currently provides 8 bits of entropy for the gap.
62606 +
62607 + Many distributions do not compile threaded remote services with the
62608 + -fstack-check argument to GCC, causing the variable-sized stack-based
62609 + allocator, alloca(), to not probe the stack on allocation. This
62610 + permits an unbounded alloca() to skip over any guard page and potentially
62611 + modify another thread's stack reliably. An enforced random gap
62612 + reduces the reliability of such an attack and increases the chance
62613 + that such a read/write to another thread's stack instead lands in
62614 + an unmapped area, causing a crash and triggering grsecurity's
62615 + anti-bruteforcing logic.
62616 +
62617 +config GRKERNSEC_PROC_MEMMAP
62618 + bool "Harden ASLR against information leaks and entropy reduction"
62619 + default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
62620 + depends on PAX_NOEXEC || PAX_ASLR
62621 + help
62622 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
62623 + give no information about the addresses of its mappings if
62624 + PaX features that rely on random addresses are enabled on the task.
62625 + In addition to sanitizing this information and disabling other
62626 + dangerous sources of information, this option causes reads of sensitive
62627 + /proc/<pid> entries where the file descriptor was opened in a different
62628 + task than the one performing the read. Such attempts are logged.
62629 + This option also limits argv/env strings for suid/sgid binaries
62630 + to 512KB to prevent a complete exhaustion of the stack entropy provided
62631 + by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
62632 + binaries to prevent alternative mmap layouts from being abused.
62633 +
62634 + If you use PaX it is essential that you say Y here as it closes up
62635 + several holes that make full ASLR useless locally.
62636 +
62637 +config GRKERNSEC_BRUTE
62638 + bool "Deter exploit bruteforcing"
62639 + default y if GRKERNSEC_CONFIG_AUTO
62640 + help
62641 + If you say Y here, attempts to bruteforce exploits against forking
62642 + daemons such as apache or sshd, as well as against suid/sgid binaries
62643 + will be deterred. When a child of a forking daemon is killed by PaX
62644 + or crashes due to an illegal instruction or other suspicious signal,
62645 + the parent process will be delayed 30 seconds upon every subsequent
62646 + fork until the administrator is able to assess the situation and
62647 + restart the daemon.
62648 + In the suid/sgid case, the attempt is logged, the user has all their
62649 + existing instances of the suid/sgid binary terminated and will
62650 + be unable to execute any suid/sgid binaries for 15 minutes.
62651 +
62652 + It is recommended that you also enable signal logging in the auditing
62653 + section so that logs are generated when a process triggers a suspicious
62654 + signal.
62655 + If the sysctl option is enabled, a sysctl option with name
62656 + "deter_bruteforce" is created.
62657 +
62658 +
62659 +config GRKERNSEC_MODHARDEN
62660 + bool "Harden module auto-loading"
62661 + default y if GRKERNSEC_CONFIG_AUTO
62662 + depends on MODULES
62663 + help
62664 + If you say Y here, module auto-loading in response to use of some
62665 + feature implemented by an unloaded module will be restricted to
62666 + root users. Enabling this option helps defend against attacks
62667 + by unprivileged users who abuse the auto-loading behavior to
62668 + cause a vulnerable module to load that is then exploited.
62669 +
62670 + If this option prevents a legitimate use of auto-loading for a
62671 + non-root user, the administrator can execute modprobe manually
62672 + with the exact name of the module mentioned in the alert log.
62673 + Alternatively, the administrator can add the module to the list
62674 + of modules loaded at boot by modifying init scripts.
62675 +
62676 + Modification of init scripts will most likely be needed on
62677 + Ubuntu servers with encrypted home directory support enabled,
62678 + as the first non-root user logging in will cause the ecb(aes),
62679 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
62680 +
62681 +config GRKERNSEC_HIDESYM
62682 + bool "Hide kernel symbols"
62683 + default y if GRKERNSEC_CONFIG_AUTO
62684 + select PAX_USERCOPY_SLABS
62685 + help
62686 + If you say Y here, getting information on loaded modules, and
62687 + displaying all kernel symbols through a syscall will be restricted
62688 + to users with CAP_SYS_MODULE. For software compatibility reasons,
62689 + /proc/kallsyms will be restricted to the root user. The RBAC
62690 + system can hide that entry even from root.
62691 +
62692 + This option also prevents leaking of kernel addresses through
62693 + several /proc entries.
62694 +
62695 + Note that this option is only effective provided the following
62696 + conditions are met:
62697 + 1) The kernel using grsecurity is not precompiled by some distribution
62698 + 2) You have also enabled GRKERNSEC_DMESG
62699 + 3) You are using the RBAC system and hiding other files such as your
62700 + kernel image and System.map. Alternatively, enabling this option
62701 + causes the permissions on /boot, /lib/modules, and the kernel
62702 + source directory to change at compile time to prevent
62703 + reading by non-root users.
62704 + If the above conditions are met, this option will aid in providing a
62705 + useful protection against local kernel exploitation of overflows
62706 + and arbitrary read/write vulnerabilities.
62707 +
62708 + It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
62709 + in addition to this feature.
62710 +
62711 +config GRKERNSEC_KERN_LOCKOUT
62712 + bool "Active kernel exploit response"
62713 + default y if GRKERNSEC_CONFIG_AUTO
62714 + depends on X86 || ARM || PPC || SPARC
62715 + help
62716 + If you say Y here, when a PaX alert is triggered due to suspicious
62717 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
62718 + or an OOPS occurs due to bad memory accesses, instead of just
62719 + terminating the offending process (and potentially allowing
62720 + a subsequent exploit from the same user), we will take one of two
62721 + actions:
62722 + If the user was root, we will panic the system
62723 + If the user was non-root, we will log the attempt, terminate
62724 + all processes owned by the user, then prevent them from creating
62725 + any new processes until the system is restarted
62726 + This deters repeated kernel exploitation/bruteforcing attempts
62727 + and is useful for later forensics.
62728 +
62729 +config GRKERNSEC_OLD_ARM_USERLAND
62730 + bool "Old ARM userland compatibility"
62731 + depends on ARM && (CPU_V6 || CPU_V6K || CPU_V7)
62732 + help
62733 + If you say Y here, stubs of executable code to perform such operations
62734 + as "compare-exchange" will be placed at fixed locations in the ARM vector
62735 + table. This is unfortunately needed for old ARM userland meant to run
62736 + across a wide range of processors. Without this option enabled,
62737 + the get_tls and data memory barrier stubs will be emulated by the kernel,
62738 + which is enough for Linaro userlands or other userlands designed for v6
62739 + and newer ARM CPUs. It's recommended that you try without this option enabled
62740 + first, and only enable it if your userland does not boot (it will likely fail
62741 + at init time).
62742 +
62743 +endmenu
62744 +menu "Role Based Access Control Options"
62745 +depends on GRKERNSEC
62746 +
62747 +config GRKERNSEC_RBAC_DEBUG
62748 + bool
62749 +
62750 +config GRKERNSEC_NO_RBAC
62751 + bool "Disable RBAC system"
62752 + help
62753 + If you say Y here, the /dev/grsec device will be removed from the kernel,
62754 + preventing the RBAC system from being enabled. You should only say Y
62755 + here if you have no intention of using the RBAC system, so as to prevent
62756 + an attacker with root access from misusing the RBAC system to hide files
62757 + and processes when loadable module support and /dev/[k]mem have been
62758 + locked down.
62759 +
62760 +config GRKERNSEC_ACL_HIDEKERN
62761 + bool "Hide kernel processes"
62762 + help
62763 + If you say Y here, all kernel threads will be hidden to all
62764 + processes but those whose subject has the "view hidden processes"
62765 + flag.
62766 +
62767 +config GRKERNSEC_ACL_MAXTRIES
62768 + int "Maximum tries before password lockout"
62769 + default 3
62770 + help
62771 + This option enforces the maximum number of times a user can attempt
62772 + to authorize themselves with the grsecurity RBAC system before being
62773 + denied the ability to attempt authorization again for a specified time.
62774 + The lower the number, the harder it will be to brute-force a password.
62775 +
62776 +config GRKERNSEC_ACL_TIMEOUT
62777 + int "Time to wait after max password tries, in seconds"
62778 + default 30
62779 + help
62780 + This option specifies the time the user must wait after attempting to
62781 + authorize to the RBAC system with the maximum number of invalid
62782 + passwords. The higher the number, the harder it will be to brute-force
62783 + a password.
62784 +
62785 +endmenu
62786 +menu "Filesystem Protections"
62787 +depends on GRKERNSEC
62788 +
62789 +config GRKERNSEC_PROC
62790 + bool "Proc restrictions"
62791 + default y if GRKERNSEC_CONFIG_AUTO
62792 + help
62793 + If you say Y here, the permissions of the /proc filesystem
62794 + will be altered to enhance system security and privacy. You MUST
62795 + choose either a user only restriction or a user and group restriction.
62796 + Depending upon the option you choose, you can either restrict users to
62797 + see only the processes they themselves run, or choose a group that can
62798 + view all processes and files normally restricted to root if you choose
62799 + the "restrict to user only" option. NOTE: If you're running identd or
62800 + ntpd as a non-root user, you will have to run it as the group you
62801 + specify here.
62802 +
62803 +config GRKERNSEC_PROC_USER
62804 + bool "Restrict /proc to user only"
62805 + depends on GRKERNSEC_PROC
62806 + help
62807 + If you say Y here, non-root users will only be able to view their own
62808 + processes, and restricts them from viewing network-related information,
62809 + and viewing kernel symbol and module information.
62810 +
62811 +config GRKERNSEC_PROC_USERGROUP
62812 + bool "Allow special group"
62813 + default y if GRKERNSEC_CONFIG_AUTO
62814 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
62815 + help
62816 + If you say Y here, you will be able to select a group that will be
62817 + able to view all processes and network-related information. If you've
62818 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
62819 + remain hidden. This option is useful if you want to run identd as
62820 + a non-root user. The group you select may also be chosen at boot time
62821 + via "grsec_proc_gid=" on the kernel commandline.
62822 +
62823 +config GRKERNSEC_PROC_GID
62824 + int "GID for special group"
62825 + depends on GRKERNSEC_PROC_USERGROUP
62826 + default 1001
62827 +
62828 +config GRKERNSEC_PROC_ADD
62829 + bool "Additional restrictions"
62830 + default y if GRKERNSEC_CONFIG_AUTO
62831 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
62832 + help
62833 + If you say Y here, additional restrictions will be placed on
62834 + /proc that keep normal users from viewing device information and
62835 + slabinfo information that could be useful for exploits.
62836 +
62837 +config GRKERNSEC_LINK
62838 + bool "Linking restrictions"
62839 + default y if GRKERNSEC_CONFIG_AUTO
62840 + help
62841 + If you say Y here, /tmp race exploits will be prevented, since users
62842 + will no longer be able to follow symlinks owned by other users in
62843 + world-writable +t directories (e.g. /tmp), unless the owner of the
62844 + symlink is the owner of the directory. users will also not be
62845 + able to hardlink to files they do not own. If the sysctl option is
62846 + enabled, a sysctl option with name "linking_restrictions" is created.
62847 +
62848 +config GRKERNSEC_SYMLINKOWN
62849 + bool "Kernel-enforced SymlinksIfOwnerMatch"
62850 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
62851 + help
62852 + Apache's SymlinksIfOwnerMatch option has an inherent race condition
62853 + that prevents it from being used as a security feature. As Apache
62854 + verifies the symlink by performing a stat() against the target of
62855 + the symlink before it is followed, an attacker can setup a symlink
62856 + to point to a same-owned file, then replace the symlink with one
62857 + that targets another user's file just after Apache "validates" the
62858 + symlink -- a classic TOCTOU race. If you say Y here, a complete,
62859 + race-free replacement for Apache's "SymlinksIfOwnerMatch" option
62860 + will be in place for the group you specify. If the sysctl option
62861 + is enabled, a sysctl option with name "enforce_symlinksifowner" is
62862 + created.
62863 +
62864 +config GRKERNSEC_SYMLINKOWN_GID
62865 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
62866 + depends on GRKERNSEC_SYMLINKOWN
62867 + default 1006
62868 + help
62869 + Setting this GID determines what group kernel-enforced
62870 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
62871 + is enabled, a sysctl option with name "symlinkown_gid" is created.
62872 +
62873 +config GRKERNSEC_FIFO
62874 + bool "FIFO restrictions"
62875 + default y if GRKERNSEC_CONFIG_AUTO
62876 + help
62877 + If you say Y here, users will not be able to write to FIFOs they don't
62878 + own in world-writable +t directories (e.g. /tmp), unless the owner of
62879 + the FIFO is the same owner of the directory it's held in. If the sysctl
62880 + option is enabled, a sysctl option with name "fifo_restrictions" is
62881 + created.
62882 +
62883 +config GRKERNSEC_SYSFS_RESTRICT
62884 + bool "Sysfs/debugfs restriction"
62885 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
62886 + depends on SYSFS
62887 + help
62888 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
62889 + any filesystem normally mounted under it (e.g. debugfs) will be
62890 + mostly accessible only by root. These filesystems generally provide access
62891 + to hardware and debug information that isn't appropriate for unprivileged
62892 + users of the system. Sysfs and debugfs have also become a large source
62893 + of new vulnerabilities, ranging from infoleaks to local compromise.
62894 + There has been very little oversight with an eye toward security involved
62895 + in adding new exporters of information to these filesystems, so their
62896 + use is discouraged.
62897 + For reasons of compatibility, a few directories have been whitelisted
62898 + for access by non-root users:
62899 + /sys/fs/selinux
62900 + /sys/fs/fuse
62901 + /sys/devices/system/cpu
62902 +
62903 +config GRKERNSEC_ROFS
62904 + bool "Runtime read-only mount protection"
62905 + depends on SYSCTL
62906 + help
62907 + If you say Y here, a sysctl option with name "romount_protect" will
62908 + be created. By setting this option to 1 at runtime, filesystems
62909 + will be protected in the following ways:
62910 + * No new writable mounts will be allowed
62911 + * Existing read-only mounts won't be able to be remounted read/write
62912 + * Write operations will be denied on all block devices
62913 + This option acts independently of grsec_lock: once it is set to 1,
62914 + it cannot be turned off. Therefore, please be mindful of the resulting
62915 + behavior if this option is enabled in an init script on a read-only
62916 + filesystem. This feature is mainly intended for secure embedded systems.
62917 +
62918 +config GRKERNSEC_DEVICE_SIDECHANNEL
62919 + bool "Eliminate stat/notify-based device sidechannels"
62920 + default y if GRKERNSEC_CONFIG_AUTO
62921 + help
62922 + If you say Y here, timing analyses on block or character
62923 + devices like /dev/ptmx using stat or inotify/dnotify/fanotify
62924 + will be thwarted for unprivileged users. If a process without
62925 + CAP_MKNOD stats such a device, the last access and last modify times
62926 + will match the device's create time. No access or modify events
62927 + will be triggered through inotify/dnotify/fanotify for such devices.
62928 + This feature will prevent attacks that may at a minimum
62929 + allow an attacker to determine the administrator's password length.
62930 +
62931 +config GRKERNSEC_CHROOT
62932 + bool "Chroot jail restrictions"
62933 + default y if GRKERNSEC_CONFIG_AUTO
62934 + help
62935 + If you say Y here, you will be able to choose several options that will
62936 + make breaking out of a chrooted jail much more difficult. If you
62937 + encounter no software incompatibilities with the following options, it
62938 + is recommended that you enable each one.
62939 +
62940 +config GRKERNSEC_CHROOT_MOUNT
62941 + bool "Deny mounts"
62942 + default y if GRKERNSEC_CONFIG_AUTO
62943 + depends on GRKERNSEC_CHROOT
62944 + help
62945 + If you say Y here, processes inside a chroot will not be able to
62946 + mount or remount filesystems. If the sysctl option is enabled, a
62947 + sysctl option with name "chroot_deny_mount" is created.
62948 +
62949 +config GRKERNSEC_CHROOT_DOUBLE
62950 + bool "Deny double-chroots"
62951 + default y if GRKERNSEC_CONFIG_AUTO
62952 + depends on GRKERNSEC_CHROOT
62953 + help
62954 + If you say Y here, processes inside a chroot will not be able to chroot
62955 + again outside the chroot. This is a widely used method of breaking
62956 + out of a chroot jail and should not be allowed. If the sysctl
62957 + option is enabled, a sysctl option with name
62958 + "chroot_deny_chroot" is created.
62959 +
62960 +config GRKERNSEC_CHROOT_PIVOT
62961 + bool "Deny pivot_root in chroot"
62962 + default y if GRKERNSEC_CONFIG_AUTO
62963 + depends on GRKERNSEC_CHROOT
62964 + help
62965 + If you say Y here, processes inside a chroot will not be able to use
62966 + a function called pivot_root() that was introduced in Linux 2.3.41. It
62967 + works similar to chroot in that it changes the root filesystem. This
62968 + function could be misused in a chrooted process to attempt to break out
62969 + of the chroot, and therefore should not be allowed. If the sysctl
62970 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
62971 + created.
62972 +
62973 +config GRKERNSEC_CHROOT_CHDIR
62974 + bool "Enforce chdir(\"/\") on all chroots"
62975 + default y if GRKERNSEC_CONFIG_AUTO
62976 + depends on GRKERNSEC_CHROOT
62977 + help
62978 + If you say Y here, the current working directory of all newly-chrooted
62979 + applications will be set to the the root directory of the chroot.
62980 + The man page on chroot(2) states:
62981 + Note that this call does not change the current working
62982 + directory, so that `.' can be outside the tree rooted at
62983 + `/'. In particular, the super-user can escape from a
62984 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
62985 +
62986 + It is recommended that you say Y here, since it's not known to break
62987 + any software. If the sysctl option is enabled, a sysctl option with
62988 + name "chroot_enforce_chdir" is created.
62989 +
62990 +config GRKERNSEC_CHROOT_CHMOD
62991 + bool "Deny (f)chmod +s"
62992 + default y if GRKERNSEC_CONFIG_AUTO
62993 + depends on GRKERNSEC_CHROOT
62994 + help
62995 + If you say Y here, processes inside a chroot will not be able to chmod
62996 + or fchmod files to make them have suid or sgid bits. This protects
62997 + against another published method of breaking a chroot. If the sysctl
62998 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
62999 + created.
63000 +
63001 +config GRKERNSEC_CHROOT_FCHDIR
63002 + bool "Deny fchdir out of chroot"
63003 + default y if GRKERNSEC_CONFIG_AUTO
63004 + depends on GRKERNSEC_CHROOT
63005 + help
63006 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
63007 + to a file descriptor of the chrooting process that points to a directory
63008 + outside the filesystem will be stopped. If the sysctl option
63009 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
63010 +
63011 +config GRKERNSEC_CHROOT_MKNOD
63012 + bool "Deny mknod"
63013 + default y if GRKERNSEC_CONFIG_AUTO
63014 + depends on GRKERNSEC_CHROOT
63015 + help
63016 + If you say Y here, processes inside a chroot will not be allowed to
63017 + mknod. The problem with using mknod inside a chroot is that it
63018 + would allow an attacker to create a device entry that is the same
63019 + as one on the physical root of your system, which could range from
63020 + anything from the console device to a device for your harddrive (which
63021 + they could then use to wipe the drive or steal data). It is recommended
63022 + that you say Y here, unless you run into software incompatibilities.
63023 + If the sysctl option is enabled, a sysctl option with name
63024 + "chroot_deny_mknod" is created.
63025 +
63026 +config GRKERNSEC_CHROOT_SHMAT
63027 + bool "Deny shmat() out of chroot"
63028 + default y if GRKERNSEC_CONFIG_AUTO
63029 + depends on GRKERNSEC_CHROOT
63030 + help
63031 + If you say Y here, processes inside a chroot will not be able to attach
63032 + to shared memory segments that were created outside of the chroot jail.
63033 + It is recommended that you say Y here. If the sysctl option is enabled,
63034 + a sysctl option with name "chroot_deny_shmat" is created.
63035 +
63036 +config GRKERNSEC_CHROOT_UNIX
63037 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
63038 + default y if GRKERNSEC_CONFIG_AUTO
63039 + depends on GRKERNSEC_CHROOT
63040 + help
63041 + If you say Y here, processes inside a chroot will not be able to
63042 + connect to abstract (meaning not belonging to a filesystem) Unix
63043 + domain sockets that were bound outside of a chroot. It is recommended
63044 + that you say Y here. If the sysctl option is enabled, a sysctl option
63045 + with name "chroot_deny_unix" is created.
63046 +
63047 +config GRKERNSEC_CHROOT_FINDTASK
63048 + bool "Protect outside processes"
63049 + default y if GRKERNSEC_CONFIG_AUTO
63050 + depends on GRKERNSEC_CHROOT
63051 + help
63052 + If you say Y here, processes inside a chroot will not be able to
63053 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
63054 + getsid, or view any process outside of the chroot. If the sysctl
63055 + option is enabled, a sysctl option with name "chroot_findtask" is
63056 + created.
63057 +
63058 +config GRKERNSEC_CHROOT_NICE
63059 + bool "Restrict priority changes"
63060 + default y if GRKERNSEC_CONFIG_AUTO
63061 + depends on GRKERNSEC_CHROOT
63062 + help
63063 + If you say Y here, processes inside a chroot will not be able to raise
63064 + the priority of processes in the chroot, or alter the priority of
63065 + processes outside the chroot. This provides more security than simply
63066 + removing CAP_SYS_NICE from the process' capability set. If the
63067 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
63068 + is created.
63069 +
63070 +config GRKERNSEC_CHROOT_SYSCTL
63071 + bool "Deny sysctl writes"
63072 + default y if GRKERNSEC_CONFIG_AUTO
63073 + depends on GRKERNSEC_CHROOT
63074 + help
63075 + If you say Y here, an attacker in a chroot will not be able to
63076 + write to sysctl entries, either by sysctl(2) or through a /proc
63077 + interface. It is strongly recommended that you say Y here. If the
63078 + sysctl option is enabled, a sysctl option with name
63079 + "chroot_deny_sysctl" is created.
63080 +
63081 +config GRKERNSEC_CHROOT_CAPS
63082 + bool "Capability restrictions"
63083 + default y if GRKERNSEC_CONFIG_AUTO
63084 + depends on GRKERNSEC_CHROOT
63085 + help
63086 + If you say Y here, the capabilities on all processes within a
63087 + chroot jail will be lowered to stop module insertion, raw i/o,
63088 + system and net admin tasks, rebooting the system, modifying immutable
63089 + files, modifying IPC owned by another, and changing the system time.
63090 + This is left an option because it can break some apps. Disable this
63091 + if your chrooted apps are having problems performing those kinds of
63092 + tasks. If the sysctl option is enabled, a sysctl option with
63093 + name "chroot_caps" is created.
63094 +
63095 +config GRKERNSEC_CHROOT_INITRD
63096 + bool "Exempt initrd tasks from restrictions"
63097 + default y if GRKERNSEC_CONFIG_AUTO
63098 + depends on GRKERNSEC_CHROOT && BLK_DEV_INITRD
63099 + help
63100 + If you say Y here, tasks started prior to init will be exempted from
63101 + grsecurity's chroot restrictions. This option is mainly meant to
63102 + resolve Plymouth's performing privileged operations unnecessarily
63103 + in a chroot.
63104 +
63105 +endmenu
63106 +menu "Kernel Auditing"
63107 +depends on GRKERNSEC
63108 +
63109 +config GRKERNSEC_AUDIT_GROUP
63110 + bool "Single group for auditing"
63111 + help
63112 + If you say Y here, the exec and chdir logging features will only operate
63113 + on a group you specify. This option is recommended if you only want to
63114 + watch certain users instead of having a large amount of logs from the
63115 + entire system. If the sysctl option is enabled, a sysctl option with
63116 + name "audit_group" is created.
63117 +
63118 +config GRKERNSEC_AUDIT_GID
63119 + int "GID for auditing"
63120 + depends on GRKERNSEC_AUDIT_GROUP
63121 + default 1007
63122 +
63123 +config GRKERNSEC_EXECLOG
63124 + bool "Exec logging"
63125 + help
63126 + If you say Y here, all execve() calls will be logged (since the
63127 + other exec*() calls are frontends to execve(), all execution
63128 + will be logged). Useful for shell-servers that like to keep track
63129 + of their users. If the sysctl option is enabled, a sysctl option with
63130 + name "exec_logging" is created.
63131 + WARNING: This option when enabled will produce a LOT of logs, especially
63132 + on an active system.
63133 +
63134 +config GRKERNSEC_RESLOG
63135 + bool "Resource logging"
63136 + default y if GRKERNSEC_CONFIG_AUTO
63137 + help
63138 + If you say Y here, all attempts to overstep resource limits will
63139 + be logged with the resource name, the requested size, and the current
63140 + limit. It is highly recommended that you say Y here. If the sysctl
63141 + option is enabled, a sysctl option with name "resource_logging" is
63142 + created. If the RBAC system is enabled, the sysctl value is ignored.
63143 +
63144 +config GRKERNSEC_CHROOT_EXECLOG
63145 + bool "Log execs within chroot"
63146 + help
63147 + If you say Y here, all executions inside a chroot jail will be logged
63148 + to syslog. This can cause a large amount of logs if certain
63149 + applications (eg. djb's daemontools) are installed on the system, and
63150 + is therefore left as an option. If the sysctl option is enabled, a
63151 + sysctl option with name "chroot_execlog" is created.
63152 +
63153 +config GRKERNSEC_AUDIT_PTRACE
63154 + bool "Ptrace logging"
63155 + help
63156 + If you say Y here, all attempts to attach to a process via ptrace
63157 + will be logged. If the sysctl option is enabled, a sysctl option
63158 + with name "audit_ptrace" is created.
63159 +
63160 +config GRKERNSEC_AUDIT_CHDIR
63161 + bool "Chdir logging"
63162 + help
63163 + If you say Y here, all chdir() calls will be logged. If the sysctl
63164 + option is enabled, a sysctl option with name "audit_chdir" is created.
63165 +
63166 +config GRKERNSEC_AUDIT_MOUNT
63167 + bool "(Un)Mount logging"
63168 + help
63169 + If you say Y here, all mounts and unmounts will be logged. If the
63170 + sysctl option is enabled, a sysctl option with name "audit_mount" is
63171 + created.
63172 +
63173 +config GRKERNSEC_SIGNAL
63174 + bool "Signal logging"
63175 + default y if GRKERNSEC_CONFIG_AUTO
63176 + help
63177 + If you say Y here, certain important signals will be logged, such as
63178 + SIGSEGV, which will as a result inform you of when a error in a program
63179 + occurred, which in some cases could mean a possible exploit attempt.
63180 + If the sysctl option is enabled, a sysctl option with name
63181 + "signal_logging" is created.
63182 +
63183 +config GRKERNSEC_FORKFAIL
63184 + bool "Fork failure logging"
63185 + help
63186 + If you say Y here, all failed fork() attempts will be logged.
63187 + This could suggest a fork bomb, or someone attempting to overstep
63188 + their process limit. If the sysctl option is enabled, a sysctl option
63189 + with name "forkfail_logging" is created.
63190 +
63191 +config GRKERNSEC_TIME
63192 + bool "Time change logging"
63193 + default y if GRKERNSEC_CONFIG_AUTO
63194 + help
63195 + If you say Y here, any changes of the system clock will be logged.
63196 + If the sysctl option is enabled, a sysctl option with name
63197 + "timechange_logging" is created.
63198 +
63199 +config GRKERNSEC_PROC_IPADDR
63200 + bool "/proc/<pid>/ipaddr support"
63201 + default y if GRKERNSEC_CONFIG_AUTO
63202 + help
63203 + If you say Y here, a new entry will be added to each /proc/<pid>
63204 + directory that contains the IP address of the person using the task.
63205 + The IP is carried across local TCP and AF_UNIX stream sockets.
63206 + This information can be useful for IDS/IPSes to perform remote response
63207 + to a local attack. The entry is readable by only the owner of the
63208 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
63209 + the RBAC system), and thus does not create privacy concerns.
63210 +
63211 +config GRKERNSEC_RWXMAP_LOG
63212 + bool 'Denied RWX mmap/mprotect logging'
63213 + default y if GRKERNSEC_CONFIG_AUTO
63214 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
63215 + help
63216 + If you say Y here, calls to mmap() and mprotect() with explicit
63217 + usage of PROT_WRITE and PROT_EXEC together will be logged when
63218 + denied by the PAX_MPROTECT feature. This feature will also
63219 + log other problematic scenarios that can occur when PAX_MPROTECT
63220 + is enabled on a binary, like textrels and PT_GNU_STACK. If the
63221 + sysctl option is enabled, a sysctl option with name "rwxmap_logging"
63222 + is created.
63223 +
63224 +endmenu
63225 +
63226 +menu "Executable Protections"
63227 +depends on GRKERNSEC
63228 +
63229 +config GRKERNSEC_DMESG
63230 + bool "Dmesg(8) restriction"
63231 + default y if GRKERNSEC_CONFIG_AUTO
63232 + help
63233 + If you say Y here, non-root users will not be able to use dmesg(8)
63234 + to view the contents of the kernel's circular log buffer.
63235 + The kernel's log buffer often contains kernel addresses and other
63236 + identifying information useful to an attacker in fingerprinting a
63237 + system for a targeted exploit.
63238 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
63239 + created.
63240 +
63241 +config GRKERNSEC_HARDEN_PTRACE
63242 + bool "Deter ptrace-based process snooping"
63243 + default y if GRKERNSEC_CONFIG_AUTO
63244 + help
63245 + If you say Y here, TTY sniffers and other malicious monitoring
63246 + programs implemented through ptrace will be defeated. If you
63247 + have been using the RBAC system, this option has already been
63248 + enabled for several years for all users, with the ability to make
63249 + fine-grained exceptions.
63250 +
63251 + This option only affects the ability of non-root users to ptrace
63252 + processes that are not a descendent of the ptracing process.
63253 + This means that strace ./binary and gdb ./binary will still work,
63254 + but attaching to arbitrary processes will not. If the sysctl
63255 + option is enabled, a sysctl option with name "harden_ptrace" is
63256 + created.
63257 +
63258 +config GRKERNSEC_PTRACE_READEXEC
63259 + bool "Require read access to ptrace sensitive binaries"
63260 + default y if GRKERNSEC_CONFIG_AUTO
63261 + help
63262 + If you say Y here, unprivileged users will not be able to ptrace unreadable
63263 + binaries. This option is useful in environments that
63264 + remove the read bits (e.g. file mode 4711) from suid binaries to
63265 + prevent infoleaking of their contents. This option adds
63266 + consistency to the use of that file mode, as the binary could normally
63267 + be read out when run without privileges while ptracing.
63268 +
63269 + If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
63270 + is created.
63271 +
63272 +config GRKERNSEC_SETXID
63273 + bool "Enforce consistent multithreaded privileges"
63274 + default y if GRKERNSEC_CONFIG_AUTO
63275 + depends on (X86 || SPARC64 || PPC || ARM || MIPS)
63276 + help
63277 + If you say Y here, a change from a root uid to a non-root uid
63278 + in a multithreaded application will cause the resulting uids,
63279 + gids, supplementary groups, and capabilities in that thread
63280 + to be propagated to the other threads of the process. In most
63281 + cases this is unnecessary, as glibc will emulate this behavior
63282 + on behalf of the application. Other libcs do not act in the
63283 + same way, allowing the other threads of the process to continue
63284 + running with root privileges. If the sysctl option is enabled,
63285 + a sysctl option with name "consistent_setxid" is created.
63286 +
63287 +config GRKERNSEC_HARDEN_IPC
63288 + bool "Disallow access to world-accessible IPC objects"
63289 + default y if GRKERNSEC_CONFIG_AUTO
63290 + depends on SYSVIPC
63291 + help
63292 + If you say Y here, access to overly-permissive IPC (shared memory,
63293 + message queues, and semaphores) will be denied for processes whose
63294 + effective user or group would not grant them permission. It's a
63295 + common error to grant too much permission to these objects, with
63296 + impact ranging from denial of service and information leaking to
63297 + privilege escalation. This feature was developed in response to
63298 + research by Tim Brown:
63299 + http://labs.portcullis.co.uk/whitepapers/memory-squatting-attacks-on-system-v-shared-memory/
63300 + who found hundreds of such insecure usages. Processes with
63301 + CAP_IPC_OWNER are still permitted to access these IPC objects.
63302 + If the sysctl option is enabled, a sysctl option with name
63303 + "harden_ipc" is created.
63304 +
63305 +config GRKERNSEC_TPE
63306 + bool "Trusted Path Execution (TPE)"
63307 + default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
63308 + help
63309 + If you say Y here, you will be able to choose a gid to add to the
63310 + supplementary groups of users you want to mark as "untrusted."
63311 + These users will not be able to execute any files that are not in
63312 + root-owned directories writable only by root. If the sysctl option
63313 + is enabled, a sysctl option with name "tpe" is created.
63314 +
63315 +config GRKERNSEC_TPE_ALL
63316 + bool "Partially restrict all non-root users"
63317 + depends on GRKERNSEC_TPE
63318 + help
63319 + If you say Y here, all non-root users will be covered under
63320 + a weaker TPE restriction. This is separate from, and in addition to,
63321 + the main TPE options that you have selected elsewhere. Thus, if a
63322 + "trusted" GID is chosen, this restriction applies to even that GID.
63323 + Under this restriction, all non-root users will only be allowed to
63324 + execute files in directories they own that are not group or
63325 + world-writable, or in directories owned by root and writable only by
63326 + root. If the sysctl option is enabled, a sysctl option with name
63327 + "tpe_restrict_all" is created.
63328 +
63329 +config GRKERNSEC_TPE_INVERT
63330 + bool "Invert GID option"
63331 + depends on GRKERNSEC_TPE
63332 + help
63333 + If you say Y here, the group you specify in the TPE configuration will
63334 + decide what group TPE restrictions will be *disabled* for. This
63335 + option is useful if you want TPE restrictions to be applied to most
63336 + users on the system. If the sysctl option is enabled, a sysctl option
63337 + with name "tpe_invert" is created. Unlike other sysctl options, this
63338 + entry will default to on for backward-compatibility.
63339 +
63340 +config GRKERNSEC_TPE_GID
63341 + int
63342 + default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
63343 + default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
63344 +
63345 +config GRKERNSEC_TPE_UNTRUSTED_GID
63346 + int "GID for TPE-untrusted users"
63347 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
63348 + default 1005
63349 + help
63350 + Setting this GID determines what group TPE restrictions will be
63351 + *enabled* for. If the sysctl option is enabled, a sysctl option
63352 + with name "tpe_gid" is created.
63353 +
63354 +config GRKERNSEC_TPE_TRUSTED_GID
63355 + int "GID for TPE-trusted users"
63356 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
63357 + default 1005
63358 + help
63359 + Setting this GID determines what group TPE restrictions will be
63360 + *disabled* for. If the sysctl option is enabled, a sysctl option
63361 + with name "tpe_gid" is created.
63362 +
63363 +endmenu
63364 +menu "Network Protections"
63365 +depends on GRKERNSEC
63366 +
63367 +config GRKERNSEC_RANDNET
63368 + bool "Larger entropy pools"
63369 + default y if GRKERNSEC_CONFIG_AUTO
63370 + help
63371 + If you say Y here, the entropy pools used for many features of Linux
63372 + and grsecurity will be doubled in size. Since several grsecurity
63373 + features use additional randomness, it is recommended that you say Y
63374 + here. Saying Y here has a similar effect as modifying
63375 + /proc/sys/kernel/random/poolsize.
63376 +
63377 +config GRKERNSEC_BLACKHOLE
63378 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
63379 + default y if GRKERNSEC_CONFIG_AUTO
63380 + depends on NET
63381 + help
63382 + If you say Y here, neither TCP resets nor ICMP
63383 + destination-unreachable packets will be sent in response to packets
63384 + sent to ports for which no associated listening process exists.
63385 + This feature supports both IPV4 and IPV6 and exempts the
63386 + loopback interface from blackholing. Enabling this feature
63387 + makes a host more resilient to DoS attacks and reduces network
63388 + visibility against scanners.
63389 +
63390 + The blackhole feature as-implemented is equivalent to the FreeBSD
63391 + blackhole feature, as it prevents RST responses to all packets, not
63392 + just SYNs. Under most application behavior this causes no
63393 + problems, but applications (like haproxy) may not close certain
63394 + connections in a way that cleanly terminates them on the remote
63395 + end, leaving the remote host in LAST_ACK state. Because of this
63396 + side-effect and to prevent intentional LAST_ACK DoSes, this
63397 + feature also adds automatic mitigation against such attacks.
63398 + The mitigation drastically reduces the amount of time a socket
63399 + can spend in LAST_ACK state. If you're using haproxy and not
63400 + all servers it connects to have this option enabled, consider
63401 + disabling this feature on the haproxy host.
63402 +
63403 + If the sysctl option is enabled, two sysctl options with names
63404 + "ip_blackhole" and "lastack_retries" will be created.
63405 + While "ip_blackhole" takes the standard zero/non-zero on/off
63406 + toggle, "lastack_retries" uses the same kinds of values as
63407 + "tcp_retries1" and "tcp_retries2". The default value of 4
63408 + prevents a socket from lasting more than 45 seconds in LAST_ACK
63409 + state.
63410 +
63411 +config GRKERNSEC_NO_SIMULT_CONNECT
63412 + bool "Disable TCP Simultaneous Connect"
63413 + default y if GRKERNSEC_CONFIG_AUTO
63414 + depends on NET
63415 + help
63416 + If you say Y here, a feature by Willy Tarreau will be enabled that
63417 + removes a weakness in Linux's strict implementation of TCP that
63418 + allows two clients to connect to each other without either entering
63419 + a listening state. The weakness allows an attacker to easily prevent
63420 + a client from connecting to a known server provided the source port
63421 + for the connection is guessed correctly.
63422 +
63423 + As the weakness could be used to prevent an antivirus or IPS from
63424 + fetching updates, or prevent an SSL gateway from fetching a CRL,
63425 + it should be eliminated by enabling this option. Though Linux is
63426 + one of few operating systems supporting simultaneous connect, it
63427 + has no legitimate use in practice and is rarely supported by firewalls.
63428 +
63429 +config GRKERNSEC_SOCKET
63430 + bool "Socket restrictions"
63431 + depends on NET
63432 + help
63433 + If you say Y here, you will be able to choose from several options.
63434 + If you assign a GID on your system and add it to the supplementary
63435 + groups of users you want to restrict socket access to, this patch
63436 + will perform up to three things, based on the option(s) you choose.
63437 +
63438 +config GRKERNSEC_SOCKET_ALL
63439 + bool "Deny any sockets to group"
63440 + depends on GRKERNSEC_SOCKET
63441 + help
63442 + If you say Y here, you will be able to choose a GID of whose users will
63443 + be unable to connect to other hosts from your machine or run server
63444 + applications from your machine. If the sysctl option is enabled, a
63445 + sysctl option with name "socket_all" is created.
63446 +
63447 +config GRKERNSEC_SOCKET_ALL_GID
63448 + int "GID to deny all sockets for"
63449 + depends on GRKERNSEC_SOCKET_ALL
63450 + default 1004
63451 + help
63452 + Here you can choose the GID to disable socket access for. Remember to
63453 + add the users you want socket access disabled for to the GID
63454 + specified here. If the sysctl option is enabled, a sysctl option
63455 + with name "socket_all_gid" is created.
63456 +
63457 +config GRKERNSEC_SOCKET_CLIENT
63458 + bool "Deny client sockets to group"
63459 + depends on GRKERNSEC_SOCKET
63460 + help
63461 + If you say Y here, you will be able to choose a GID of whose users will
63462 + be unable to connect to other hosts from your machine, but will be
63463 + able to run servers. If this option is enabled, all users in the group
63464 + you specify will have to use passive mode when initiating ftp transfers
63465 + from the shell on your machine. If the sysctl option is enabled, a
63466 + sysctl option with name "socket_client" is created.
63467 +
63468 +config GRKERNSEC_SOCKET_CLIENT_GID
63469 + int "GID to deny client sockets for"
63470 + depends on GRKERNSEC_SOCKET_CLIENT
63471 + default 1003
63472 + help
63473 + Here you can choose the GID to disable client socket access for.
63474 + Remember to add the users you want client socket access disabled for to
63475 + the GID specified here. If the sysctl option is enabled, a sysctl
63476 + option with name "socket_client_gid" is created.
63477 +
63478 +config GRKERNSEC_SOCKET_SERVER
63479 + bool "Deny server sockets to group"
63480 + depends on GRKERNSEC_SOCKET
63481 + help
63482 + If you say Y here, you will be able to choose a GID of whose users will
63483 + be unable to run server applications from your machine. If the sysctl
63484 + option is enabled, a sysctl option with name "socket_server" is created.
63485 +
63486 +config GRKERNSEC_SOCKET_SERVER_GID
63487 + int "GID to deny server sockets for"
63488 + depends on GRKERNSEC_SOCKET_SERVER
63489 + default 1002
63490 + help
63491 + Here you can choose the GID to disable server socket access for.
63492 + Remember to add the users you want server socket access disabled for to
63493 + the GID specified here. If the sysctl option is enabled, a sysctl
63494 + option with name "socket_server_gid" is created.
63495 +
63496 +endmenu
63497 +
63498 +menu "Physical Protections"
63499 +depends on GRKERNSEC
63500 +
63501 +config GRKERNSEC_DENYUSB
63502 + bool "Deny new USB connections after toggle"
63503 + default y if GRKERNSEC_CONFIG_AUTO
63504 + depends on SYSCTL && USB_SUPPORT
63505 + help
63506 + If you say Y here, a new sysctl option with name "deny_new_usb"
63507 + will be created. Setting its value to 1 will prevent any new
63508 + USB devices from being recognized by the OS. Any attempted USB
63509 + device insertion will be logged. This option is intended to be
63510 + used against custom USB devices designed to exploit vulnerabilities
63511 + in various USB device drivers.
63512 +
63513 + For greatest effectiveness, this sysctl should be set after any
63514 + relevant init scripts. This option is safe to enable in distros
63515 + as each user can choose whether or not to toggle the sysctl.
63516 +
63517 +config GRKERNSEC_DENYUSB_FORCE
63518 + bool "Reject all USB devices not connected at boot"
63519 + select USB
63520 + depends on GRKERNSEC_DENYUSB
63521 + help
63522 + If you say Y here, a variant of GRKERNSEC_DENYUSB will be enabled
63523 + that doesn't involve a sysctl entry. This option should only be
63524 + enabled if you're sure you want to deny all new USB connections
63525 + at runtime and don't want to modify init scripts. This should not
63526 + be enabled by distros. It forces the core USB code to be built
63527 + into the kernel image so that all devices connected at boot time
63528 + can be recognized and new USB device connections can be prevented
63529 + prior to init running.
63530 +
63531 +endmenu
63532 +
63533 +menu "Sysctl Support"
63534 +depends on GRKERNSEC && SYSCTL
63535 +
63536 +config GRKERNSEC_SYSCTL
63537 + bool "Sysctl support"
63538 + default y if GRKERNSEC_CONFIG_AUTO
63539 + help
63540 + If you say Y here, you will be able to change the options that
63541 + grsecurity runs with at bootup, without having to recompile your
63542 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
63543 + to enable (1) or disable (0) various features. All the sysctl entries
63544 + are mutable until the "grsec_lock" entry is set to a non-zero value.
63545 + All features enabled in the kernel configuration are disabled at boot
63546 + if you do not say Y to the "Turn on features by default" option.
63547 + All options should be set at startup, and the grsec_lock entry should
63548 + be set to a non-zero value after all the options are set.
63549 + *THIS IS EXTREMELY IMPORTANT*
63550 +
63551 +config GRKERNSEC_SYSCTL_DISTRO
63552 + bool "Extra sysctl support for distro makers (READ HELP)"
63553 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
63554 + help
63555 + If you say Y here, additional sysctl options will be created
63556 + for features that affect processes running as root. Therefore,
63557 + it is critical when using this option that the grsec_lock entry be
63558 + enabled after boot. Only distros with prebuilt kernel packages
63559 + with this option enabled that can ensure grsec_lock is enabled
63560 + after boot should use this option.
63561 + *Failure to set grsec_lock after boot makes all grsec features
63562 + this option covers useless*
63563 +
63564 + Currently this option creates the following sysctl entries:
63565 + "Disable Privileged I/O": "disable_priv_io"
63566 +
63567 +config GRKERNSEC_SYSCTL_ON
63568 + bool "Turn on features by default"
63569 + default y if GRKERNSEC_CONFIG_AUTO
63570 + depends on GRKERNSEC_SYSCTL
63571 + help
63572 + If you say Y here, instead of having all features enabled in the
63573 + kernel configuration disabled at boot time, the features will be
63574 + enabled at boot time. It is recommended you say Y here unless
63575 + there is some reason you would want all sysctl-tunable features to
63576 + be disabled by default. As mentioned elsewhere, it is important
63577 + to enable the grsec_lock entry once you have finished modifying
63578 + the sysctl entries.
63579 +
63580 +endmenu
63581 +menu "Logging Options"
63582 +depends on GRKERNSEC
63583 +
63584 +config GRKERNSEC_FLOODTIME
63585 + int "Seconds in between log messages (minimum)"
63586 + default 10
63587 + help
63588 + This option allows you to enforce the number of seconds between
63589 + grsecurity log messages. The default should be suitable for most
63590 + people, however, if you choose to change it, choose a value small enough
63591 + to allow informative logs to be produced, but large enough to
63592 + prevent flooding.
63593 +
63594 +config GRKERNSEC_FLOODBURST
63595 + int "Number of messages in a burst (maximum)"
63596 + default 6
63597 + help
63598 + This option allows you to choose the maximum number of messages allowed
63599 + within the flood time interval you chose in a separate option. The
63600 + default should be suitable for most people, however if you find that
63601 + many of your logs are being interpreted as flooding, you may want to
63602 + raise this value.
63603 +
63604 +endmenu
63605 diff --git a/grsecurity/Makefile b/grsecurity/Makefile
63606 new file mode 100644
63607 index 0000000..85beb79
63608 --- /dev/null
63609 +++ b/grsecurity/Makefile
63610 @@ -0,0 +1,43 @@
63611 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
63612 +# during 2001-2009 it has been completely redesigned by Brad Spengler
63613 +# into an RBAC system
63614 +#
63615 +# All code in this directory and various hooks inserted throughout the kernel
63616 +# are copyright Brad Spengler - Open Source Security, Inc., and released
63617 +# under the GPL v2 or higher
63618 +
63619 +KBUILD_CFLAGS += -Werror
63620 +
63621 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
63622 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
63623 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o \
63624 + grsec_usb.o grsec_ipc.o
63625 +
63626 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
63627 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
63628 + gracl_learn.o grsec_log.o gracl_policy.o
63629 +ifdef CONFIG_COMPAT
63630 +obj-$(CONFIG_GRKERNSEC) += gracl_compat.o
63631 +endif
63632 +
63633 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
63634 +
63635 +ifdef CONFIG_NET
63636 +obj-y += grsec_sock.o
63637 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
63638 +endif
63639 +
63640 +ifndef CONFIG_GRKERNSEC
63641 +obj-y += grsec_disabled.o
63642 +endif
63643 +
63644 +ifdef CONFIG_GRKERNSEC_HIDESYM
63645 +extra-y := grsec_hidesym.o
63646 +$(obj)/grsec_hidesym.o:
63647 + @-chmod -f 500 /boot
63648 + @-chmod -f 500 /lib/modules
63649 + @-chmod -f 500 /lib64/modules
63650 + @-chmod -f 500 /lib32/modules
63651 + @-chmod -f 700 .
63652 + @echo ' grsec: protected kernel image paths'
63653 +endif
63654 diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
63655 new file mode 100644
63656 index 0000000..6affeea
63657 --- /dev/null
63658 +++ b/grsecurity/gracl.c
63659 @@ -0,0 +1,2679 @@
63660 +#include <linux/kernel.h>
63661 +#include <linux/module.h>
63662 +#include <linux/sched.h>
63663 +#include <linux/mm.h>
63664 +#include <linux/file.h>
63665 +#include <linux/fs.h>
63666 +#include <linux/namei.h>
63667 +#include <linux/mount.h>
63668 +#include <linux/tty.h>
63669 +#include <linux/proc_fs.h>
63670 +#include <linux/lglock.h>
63671 +#include <linux/slab.h>
63672 +#include <linux/vmalloc.h>
63673 +#include <linux/types.h>
63674 +#include <linux/sysctl.h>
63675 +#include <linux/netdevice.h>
63676 +#include <linux/ptrace.h>
63677 +#include <linux/gracl.h>
63678 +#include <linux/gralloc.h>
63679 +#include <linux/security.h>
63680 +#include <linux/grinternal.h>
63681 +#include <linux/pid_namespace.h>
63682 +#include <linux/stop_machine.h>
63683 +#include <linux/fdtable.h>
63684 +#include <linux/percpu.h>
63685 +#include <linux/lglock.h>
63686 +#include <linux/hugetlb.h>
63687 +#include <linux/posix-timers.h>
63688 +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
63689 +#include <linux/magic.h>
63690 +#include <linux/pagemap.h>
63691 +#include "../fs/btrfs/async-thread.h"
63692 +#include "../fs/btrfs/ctree.h"
63693 +#include "../fs/btrfs/btrfs_inode.h"
63694 +#endif
63695 +#include "../fs/mount.h"
63696 +
63697 +#include <asm/uaccess.h>
63698 +#include <asm/errno.h>
63699 +#include <asm/mman.h>
63700 +
63701 +#define FOR_EACH_ROLE_START(role) \
63702 + role = running_polstate.role_list; \
63703 + while (role) {
63704 +
63705 +#define FOR_EACH_ROLE_END(role) \
63706 + role = role->prev; \
63707 + }
63708 +
63709 +extern struct lglock vfsmount_lock;
63710 +
63711 +extern struct path gr_real_root;
63712 +
63713 +static struct gr_policy_state running_polstate;
63714 +struct gr_policy_state *polstate = &running_polstate;
63715 +extern struct gr_alloc_state *current_alloc_state;
63716 +
63717 +extern char *gr_shared_page[4];
63718 +DEFINE_RWLOCK(gr_inode_lock);
63719 +
63720 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
63721 +
63722 +#ifdef CONFIG_NET
63723 +extern struct vfsmount *sock_mnt;
63724 +#endif
63725 +
63726 +extern struct vfsmount *pipe_mnt;
63727 +extern struct vfsmount *shm_mnt;
63728 +
63729 +#ifdef CONFIG_HUGETLBFS
63730 +extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
63731 +#endif
63732 +
63733 +extern u16 acl_sp_role_value;
63734 +extern struct acl_object_label *fakefs_obj_rw;
63735 +extern struct acl_object_label *fakefs_obj_rwx;
63736 +
63737 +int gr_acl_is_enabled(void)
63738 +{
63739 + return (gr_status & GR_READY);
63740 +}
63741 +
63742 +void gr_enable_rbac_system(void)
63743 +{
63744 + pax_open_kernel();
63745 + gr_status |= GR_READY;
63746 + pax_close_kernel();
63747 +}
63748 +
63749 +int gr_rbac_disable(void *unused)
63750 +{
63751 + pax_open_kernel();
63752 + gr_status &= ~GR_READY;
63753 + pax_close_kernel();
63754 +
63755 + return 0;
63756 +}
63757 +
63758 +static inline dev_t __get_dev(const struct dentry *dentry)
63759 +{
63760 +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
63761 + if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
63762 + return BTRFS_I(dentry->d_inode)->root->anon_dev;
63763 + else
63764 +#endif
63765 + return dentry->d_sb->s_dev;
63766 +}
63767 +
63768 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
63769 +{
63770 + return __get_dev(dentry);
63771 +}
63772 +
63773 +static char gr_task_roletype_to_char(struct task_struct *task)
63774 +{
63775 + switch (task->role->roletype &
63776 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
63777 + GR_ROLE_SPECIAL)) {
63778 + case GR_ROLE_DEFAULT:
63779 + return 'D';
63780 + case GR_ROLE_USER:
63781 + return 'U';
63782 + case GR_ROLE_GROUP:
63783 + return 'G';
63784 + case GR_ROLE_SPECIAL:
63785 + return 'S';
63786 + }
63787 +
63788 + return 'X';
63789 +}
63790 +
63791 +char gr_roletype_to_char(void)
63792 +{
63793 + return gr_task_roletype_to_char(current);
63794 +}
63795 +
63796 +__inline__ int
63797 +gr_acl_tpe_check(void)
63798 +{
63799 + if (unlikely(!(gr_status & GR_READY)))
63800 + return 0;
63801 + if (current->role->roletype & GR_ROLE_TPE)
63802 + return 1;
63803 + else
63804 + return 0;
63805 +}
63806 +
63807 +int
63808 +gr_handle_rawio(const struct inode *inode)
63809 +{
63810 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63811 + if (inode && S_ISBLK(inode->i_mode) &&
63812 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
63813 + !capable(CAP_SYS_RAWIO))
63814 + return 1;
63815 +#endif
63816 + return 0;
63817 +}
63818 +
63819 +int
63820 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
63821 +{
63822 + if (likely(lena != lenb))
63823 + return 0;
63824 +
63825 + return !memcmp(a, b, lena);
63826 +}
63827 +
63828 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
63829 +{
63830 + *buflen -= namelen;
63831 + if (*buflen < 0)
63832 + return -ENAMETOOLONG;
63833 + *buffer -= namelen;
63834 + memcpy(*buffer, str, namelen);
63835 + return 0;
63836 +}
63837 +
63838 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
63839 +{
63840 + return prepend(buffer, buflen, name->name, name->len);
63841 +}
63842 +
63843 +static int prepend_path(const struct path *path, struct path *root,
63844 + char **buffer, int *buflen)
63845 +{
63846 + struct dentry *dentry = path->dentry;
63847 + struct vfsmount *vfsmnt = path->mnt;
63848 + struct mount *mnt = real_mount(vfsmnt);
63849 + bool slash = false;
63850 + int error = 0;
63851 +
63852 + while (dentry != root->dentry || vfsmnt != root->mnt) {
63853 + struct dentry * parent;
63854 +
63855 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
63856 + /* Global root? */
63857 + if (!mnt_has_parent(mnt)) {
63858 + goto out;
63859 + }
63860 + dentry = mnt->mnt_mountpoint;
63861 + mnt = mnt->mnt_parent;
63862 + vfsmnt = &mnt->mnt;
63863 + continue;
63864 + }
63865 + parent = dentry->d_parent;
63866 + prefetch(parent);
63867 + spin_lock(&dentry->d_lock);
63868 + error = prepend_name(buffer, buflen, &dentry->d_name);
63869 + spin_unlock(&dentry->d_lock);
63870 + if (!error)
63871 + error = prepend(buffer, buflen, "/", 1);
63872 + if (error)
63873 + break;
63874 +
63875 + slash = true;
63876 + dentry = parent;
63877 + }
63878 +
63879 +out:
63880 + if (!error && !slash)
63881 + error = prepend(buffer, buflen, "/", 1);
63882 +
63883 + return error;
63884 +}
63885 +
63886 +/* this must be called with vfsmount_lock and rename_lock held */
63887 +
63888 +static char *__our_d_path(const struct path *path, struct path *root,
63889 + char *buf, int buflen)
63890 +{
63891 + char *res = buf + buflen;
63892 + int error;
63893 +
63894 + prepend(&res, &buflen, "\0", 1);
63895 + error = prepend_path(path, root, &res, &buflen);
63896 + if (error)
63897 + return ERR_PTR(error);
63898 +
63899 + return res;
63900 +}
63901 +
63902 +static char *
63903 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
63904 +{
63905 + char *retval;
63906 +
63907 + retval = __our_d_path(path, root, buf, buflen);
63908 + if (unlikely(IS_ERR(retval)))
63909 + retval = strcpy(buf, "<path too long>");
63910 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
63911 + retval[1] = '\0';
63912 +
63913 + return retval;
63914 +}
63915 +
63916 +static char *
63917 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
63918 + char *buf, int buflen)
63919 +{
63920 + struct path path;
63921 + char *res;
63922 +
63923 + path.dentry = (struct dentry *)dentry;
63924 + path.mnt = (struct vfsmount *)vfsmnt;
63925 +
63926 + /* we can use gr_real_root.dentry, gr_real_root.mnt, because this is only called
63927 + by the RBAC system */
63928 + res = gen_full_path(&path, &gr_real_root, buf, buflen);
63929 +
63930 + return res;
63931 +}
63932 +
63933 +static char *
63934 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
63935 + char *buf, int buflen)
63936 +{
63937 + char *res;
63938 + struct path path;
63939 + struct path root;
63940 + struct task_struct *reaper = init_pid_ns.child_reaper;
63941 +
63942 + path.dentry = (struct dentry *)dentry;
63943 + path.mnt = (struct vfsmount *)vfsmnt;
63944 +
63945 + /* we can't use gr_real_root.dentry, gr_real_root.mnt, because they belong only to the RBAC system */
63946 + get_fs_root(reaper->fs, &root);
63947 +
63948 + br_read_lock(&vfsmount_lock);
63949 + write_seqlock(&rename_lock);
63950 + res = gen_full_path(&path, &root, buf, buflen);
63951 + write_sequnlock(&rename_lock);
63952 + br_read_unlock(&vfsmount_lock);
63953 +
63954 + path_put(&root);
63955 + return res;
63956 +}
63957 +
63958 +char *
63959 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
63960 +{
63961 + char *ret;
63962 + br_read_lock(&vfsmount_lock);
63963 + write_seqlock(&rename_lock);
63964 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
63965 + PAGE_SIZE);
63966 + write_sequnlock(&rename_lock);
63967 + br_read_unlock(&vfsmount_lock);
63968 + return ret;
63969 +}
63970 +
63971 +static char *
63972 +gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
63973 +{
63974 + char *ret;
63975 + char *buf;
63976 + int buflen;
63977 +
63978 + br_read_lock(&vfsmount_lock);
63979 + write_seqlock(&rename_lock);
63980 + buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
63981 + ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
63982 + buflen = (int)(ret - buf);
63983 + if (buflen >= 5)
63984 + prepend(&ret, &buflen, "/proc", 5);
63985 + else
63986 + ret = strcpy(buf, "<path too long>");
63987 + write_sequnlock(&rename_lock);
63988 + br_read_unlock(&vfsmount_lock);
63989 + return ret;
63990 +}
63991 +
63992 +char *
63993 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
63994 +{
63995 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
63996 + PAGE_SIZE);
63997 +}
63998 +
63999 +char *
64000 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
64001 +{
64002 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
64003 + PAGE_SIZE);
64004 +}
64005 +
64006 +char *
64007 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
64008 +{
64009 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
64010 + PAGE_SIZE);
64011 +}
64012 +
64013 +char *
64014 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
64015 +{
64016 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
64017 + PAGE_SIZE);
64018 +}
64019 +
64020 +char *
64021 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
64022 +{
64023 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
64024 + PAGE_SIZE);
64025 +}
64026 +
64027 +__inline__ __u32
64028 +to_gr_audit(const __u32 reqmode)
64029 +{
64030 + /* masks off auditable permission flags, then shifts them to create
64031 + auditing flags, and adds the special case of append auditing if
64032 + we're requesting write */
64033 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
64034 +}
64035 +
64036 +struct acl_role_label *
64037 +__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid,
64038 + const gid_t gid)
64039 +{
64040 + unsigned int index = gr_rhash(uid, GR_ROLE_USER, state->acl_role_set.r_size);
64041 + struct acl_role_label *match;
64042 + struct role_allowed_ip *ipp;
64043 + unsigned int x;
64044 + u32 curr_ip = task->signal->saved_ip;
64045 +
64046 + match = state->acl_role_set.r_hash[index];
64047 +
64048 + while (match) {
64049 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
64050 + for (x = 0; x < match->domain_child_num; x++) {
64051 + if (match->domain_children[x] == uid)
64052 + goto found;
64053 + }
64054 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
64055 + break;
64056 + match = match->next;
64057 + }
64058 +found:
64059 + if (match == NULL) {
64060 + try_group:
64061 + index = gr_rhash(gid, GR_ROLE_GROUP, state->acl_role_set.r_size);
64062 + match = state->acl_role_set.r_hash[index];
64063 +
64064 + while (match) {
64065 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
64066 + for (x = 0; x < match->domain_child_num; x++) {
64067 + if (match->domain_children[x] == gid)
64068 + goto found2;
64069 + }
64070 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
64071 + break;
64072 + match = match->next;
64073 + }
64074 +found2:
64075 + if (match == NULL)
64076 + match = state->default_role;
64077 + if (match->allowed_ips == NULL)
64078 + return match;
64079 + else {
64080 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
64081 + if (likely
64082 + ((ntohl(curr_ip) & ipp->netmask) ==
64083 + (ntohl(ipp->addr) & ipp->netmask)))
64084 + return match;
64085 + }
64086 + match = state->default_role;
64087 + }
64088 + } else if (match->allowed_ips == NULL) {
64089 + return match;
64090 + } else {
64091 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
64092 + if (likely
64093 + ((ntohl(curr_ip) & ipp->netmask) ==
64094 + (ntohl(ipp->addr) & ipp->netmask)))
64095 + return match;
64096 + }
64097 + goto try_group;
64098 + }
64099 +
64100 + return match;
64101 +}
64102 +
64103 +static struct acl_role_label *
64104 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
64105 + const gid_t gid)
64106 +{
64107 + return __lookup_acl_role_label(&running_polstate, task, uid, gid);
64108 +}
64109 +
64110 +struct acl_subject_label *
64111 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
64112 + const struct acl_role_label *role)
64113 +{
64114 + unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
64115 + struct acl_subject_label *match;
64116 +
64117 + match = role->subj_hash[index];
64118 +
64119 + while (match && (match->inode != ino || match->device != dev ||
64120 + (match->mode & GR_DELETED))) {
64121 + match = match->next;
64122 + }
64123 +
64124 + if (match && !(match->mode & GR_DELETED))
64125 + return match;
64126 + else
64127 + return NULL;
64128 +}
64129 +
64130 +struct acl_subject_label *
64131 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
64132 + const struct acl_role_label *role)
64133 +{
64134 + unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
64135 + struct acl_subject_label *match;
64136 +
64137 + match = role->subj_hash[index];
64138 +
64139 + while (match && (match->inode != ino || match->device != dev ||
64140 + !(match->mode & GR_DELETED))) {
64141 + match = match->next;
64142 + }
64143 +
64144 + if (match && (match->mode & GR_DELETED))
64145 + return match;
64146 + else
64147 + return NULL;
64148 +}
64149 +
64150 +static struct acl_object_label *
64151 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
64152 + const struct acl_subject_label *subj)
64153 +{
64154 + unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
64155 + struct acl_object_label *match;
64156 +
64157 + match = subj->obj_hash[index];
64158 +
64159 + while (match && (match->inode != ino || match->device != dev ||
64160 + (match->mode & GR_DELETED))) {
64161 + match = match->next;
64162 + }
64163 +
64164 + if (match && !(match->mode & GR_DELETED))
64165 + return match;
64166 + else
64167 + return NULL;
64168 +}
64169 +
64170 +static struct acl_object_label *
64171 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
64172 + const struct acl_subject_label *subj)
64173 +{
64174 + unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
64175 + struct acl_object_label *match;
64176 +
64177 + match = subj->obj_hash[index];
64178 +
64179 + while (match && (match->inode != ino || match->device != dev ||
64180 + !(match->mode & GR_DELETED))) {
64181 + match = match->next;
64182 + }
64183 +
64184 + if (match && (match->mode & GR_DELETED))
64185 + return match;
64186 +
64187 + match = subj->obj_hash[index];
64188 +
64189 + while (match && (match->inode != ino || match->device != dev ||
64190 + (match->mode & GR_DELETED))) {
64191 + match = match->next;
64192 + }
64193 +
64194 + if (match && !(match->mode & GR_DELETED))
64195 + return match;
64196 + else
64197 + return NULL;
64198 +}
64199 +
64200 +struct name_entry *
64201 +__lookup_name_entry(const struct gr_policy_state *state, const char *name)
64202 +{
64203 + unsigned int len = strlen(name);
64204 + unsigned int key = full_name_hash(name, len);
64205 + unsigned int index = key % state->name_set.n_size;
64206 + struct name_entry *match;
64207 +
64208 + match = state->name_set.n_hash[index];
64209 +
64210 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
64211 + match = match->next;
64212 +
64213 + return match;
64214 +}
64215 +
64216 +static struct name_entry *
64217 +lookup_name_entry(const char *name)
64218 +{
64219 + return __lookup_name_entry(&running_polstate, name);
64220 +}
64221 +
64222 +static struct name_entry *
64223 +lookup_name_entry_create(const char *name)
64224 +{
64225 + unsigned int len = strlen(name);
64226 + unsigned int key = full_name_hash(name, len);
64227 + unsigned int index = key % running_polstate.name_set.n_size;
64228 + struct name_entry *match;
64229 +
64230 + match = running_polstate.name_set.n_hash[index];
64231 +
64232 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
64233 + !match->deleted))
64234 + match = match->next;
64235 +
64236 + if (match && match->deleted)
64237 + return match;
64238 +
64239 + match = running_polstate.name_set.n_hash[index];
64240 +
64241 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
64242 + match->deleted))
64243 + match = match->next;
64244 +
64245 + if (match && !match->deleted)
64246 + return match;
64247 + else
64248 + return NULL;
64249 +}
64250 +
64251 +static struct inodev_entry *
64252 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
64253 +{
64254 + unsigned int index = gr_fhash(ino, dev, running_polstate.inodev_set.i_size);
64255 + struct inodev_entry *match;
64256 +
64257 + match = running_polstate.inodev_set.i_hash[index];
64258 +
64259 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
64260 + match = match->next;
64261 +
64262 + return match;
64263 +}
64264 +
64265 +void
64266 +__insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry)
64267 +{
64268 + unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
64269 + state->inodev_set.i_size);
64270 + struct inodev_entry **curr;
64271 +
64272 + entry->prev = NULL;
64273 +
64274 + curr = &state->inodev_set.i_hash[index];
64275 + if (*curr != NULL)
64276 + (*curr)->prev = entry;
64277 +
64278 + entry->next = *curr;
64279 + *curr = entry;
64280 +
64281 + return;
64282 +}
64283 +
64284 +static void
64285 +insert_inodev_entry(struct inodev_entry *entry)
64286 +{
64287 + __insert_inodev_entry(&running_polstate, entry);
64288 +}
64289 +
64290 +void
64291 +insert_acl_obj_label(struct acl_object_label *obj,
64292 + struct acl_subject_label *subj)
64293 +{
64294 + unsigned int index =
64295 + gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
64296 + struct acl_object_label **curr;
64297 +
64298 + obj->prev = NULL;
64299 +
64300 + curr = &subj->obj_hash[index];
64301 + if (*curr != NULL)
64302 + (*curr)->prev = obj;
64303 +
64304 + obj->next = *curr;
64305 + *curr = obj;
64306 +
64307 + return;
64308 +}
64309 +
64310 +void
64311 +insert_acl_subj_label(struct acl_subject_label *obj,
64312 + struct acl_role_label *role)
64313 +{
64314 + unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
64315 + struct acl_subject_label **curr;
64316 +
64317 + obj->prev = NULL;
64318 +
64319 + curr = &role->subj_hash[index];
64320 + if (*curr != NULL)
64321 + (*curr)->prev = obj;
64322 +
64323 + obj->next = *curr;
64324 + *curr = obj;
64325 +
64326 + return;
64327 +}
64328 +
64329 +/* derived from glibc fnmatch() 0: match, 1: no match*/
64330 +
64331 +static int
64332 +glob_match(const char *p, const char *n)
64333 +{
64334 + char c;
64335 +
64336 + while ((c = *p++) != '\0') {
64337 + switch (c) {
64338 + case '?':
64339 + if (*n == '\0')
64340 + return 1;
64341 + else if (*n == '/')
64342 + return 1;
64343 + break;
64344 + case '\\':
64345 + if (*n != c)
64346 + return 1;
64347 + break;
64348 + case '*':
64349 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
64350 + if (*n == '/')
64351 + return 1;
64352 + else if (c == '?') {
64353 + if (*n == '\0')
64354 + return 1;
64355 + else
64356 + ++n;
64357 + }
64358 + }
64359 + if (c == '\0') {
64360 + return 0;
64361 + } else {
64362 + const char *endp;
64363 +
64364 + if ((endp = strchr(n, '/')) == NULL)
64365 + endp = n + strlen(n);
64366 +
64367 + if (c == '[') {
64368 + for (--p; n < endp; ++n)
64369 + if (!glob_match(p, n))
64370 + return 0;
64371 + } else if (c == '/') {
64372 + while (*n != '\0' && *n != '/')
64373 + ++n;
64374 + if (*n == '/' && !glob_match(p, n + 1))
64375 + return 0;
64376 + } else {
64377 + for (--p; n < endp; ++n)
64378 + if (*n == c && !glob_match(p, n))
64379 + return 0;
64380 + }
64381 +
64382 + return 1;
64383 + }
64384 + case '[':
64385 + {
64386 + int not;
64387 + char cold;
64388 +
64389 + if (*n == '\0' || *n == '/')
64390 + return 1;
64391 +
64392 + not = (*p == '!' || *p == '^');
64393 + if (not)
64394 + ++p;
64395 +
64396 + c = *p++;
64397 + for (;;) {
64398 + unsigned char fn = (unsigned char)*n;
64399 +
64400 + if (c == '\0')
64401 + return 1;
64402 + else {
64403 + if (c == fn)
64404 + goto matched;
64405 + cold = c;
64406 + c = *p++;
64407 +
64408 + if (c == '-' && *p != ']') {
64409 + unsigned char cend = *p++;
64410 +
64411 + if (cend == '\0')
64412 + return 1;
64413 +
64414 + if (cold <= fn && fn <= cend)
64415 + goto matched;
64416 +
64417 + c = *p++;
64418 + }
64419 + }
64420 +
64421 + if (c == ']')
64422 + break;
64423 + }
64424 + if (!not)
64425 + return 1;
64426 + break;
64427 + matched:
64428 + while (c != ']') {
64429 + if (c == '\0')
64430 + return 1;
64431 +
64432 + c = *p++;
64433 + }
64434 + if (not)
64435 + return 1;
64436 + }
64437 + break;
64438 + default:
64439 + if (c != *n)
64440 + return 1;
64441 + }
64442 +
64443 + ++n;
64444 + }
64445 +
64446 + if (*n == '\0')
64447 + return 0;
64448 +
64449 + if (*n == '/')
64450 + return 0;
64451 +
64452 + return 1;
64453 +}
64454 +
64455 +static struct acl_object_label *
64456 +chk_glob_label(struct acl_object_label *globbed,
64457 + const struct dentry *dentry, const struct vfsmount *mnt, char **path)
64458 +{
64459 + struct acl_object_label *tmp;
64460 +
64461 + if (*path == NULL)
64462 + *path = gr_to_filename_nolock(dentry, mnt);
64463 +
64464 + tmp = globbed;
64465 +
64466 + while (tmp) {
64467 + if (!glob_match(tmp->filename, *path))
64468 + return tmp;
64469 + tmp = tmp->next;
64470 + }
64471 +
64472 + return NULL;
64473 +}
64474 +
64475 +static struct acl_object_label *
64476 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
64477 + const ino_t curr_ino, const dev_t curr_dev,
64478 + const struct acl_subject_label *subj, char **path, const int checkglob)
64479 +{
64480 + struct acl_subject_label *tmpsubj;
64481 + struct acl_object_label *retval;
64482 + struct acl_object_label *retval2;
64483 +
64484 + tmpsubj = (struct acl_subject_label *) subj;
64485 + read_lock(&gr_inode_lock);
64486 + do {
64487 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
64488 + if (retval) {
64489 + if (checkglob && retval->globbed) {
64490 + retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
64491 + if (retval2)
64492 + retval = retval2;
64493 + }
64494 + break;
64495 + }
64496 + } while ((tmpsubj = tmpsubj->parent_subject));
64497 + read_unlock(&gr_inode_lock);
64498 +
64499 + return retval;
64500 +}
64501 +
64502 +static __inline__ struct acl_object_label *
64503 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
64504 + struct dentry *curr_dentry,
64505 + const struct acl_subject_label *subj, char **path, const int checkglob)
64506 +{
64507 + int newglob = checkglob;
64508 + ino_t inode;
64509 + dev_t device;
64510 +
64511 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
64512 + as we don't want a / * rule to match instead of the / object
64513 + don't do this for create lookups that call this function though, since they're looking up
64514 + on the parent and thus need globbing checks on all paths
64515 + */
64516 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
64517 + newglob = GR_NO_GLOB;
64518 +
64519 + spin_lock(&curr_dentry->d_lock);
64520 + inode = curr_dentry->d_inode->i_ino;
64521 + device = __get_dev(curr_dentry);
64522 + spin_unlock(&curr_dentry->d_lock);
64523 +
64524 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
64525 +}
64526 +
64527 +#ifdef CONFIG_HUGETLBFS
64528 +static inline bool
64529 +is_hugetlbfs_mnt(const struct vfsmount *mnt)
64530 +{
64531 + int i;
64532 + for (i = 0; i < HUGE_MAX_HSTATE; i++) {
64533 + if (unlikely(hugetlbfs_vfsmount[i] == mnt))
64534 + return true;
64535 + }
64536 +
64537 + return false;
64538 +}
64539 +#endif
64540 +
64541 +static struct acl_object_label *
64542 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64543 + const struct acl_subject_label *subj, char *path, const int checkglob)
64544 +{
64545 + struct dentry *dentry = (struct dentry *) l_dentry;
64546 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
64547 + struct mount *real_mnt = real_mount(mnt);
64548 + struct acl_object_label *retval;
64549 + struct dentry *parent;
64550 +
64551 + br_read_lock(&vfsmount_lock);
64552 + write_seqlock(&rename_lock);
64553 +
64554 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
64555 +#ifdef CONFIG_NET
64556 + mnt == sock_mnt ||
64557 +#endif
64558 +#ifdef CONFIG_HUGETLBFS
64559 + (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
64560 +#endif
64561 + /* ignore Eric Biederman */
64562 + IS_PRIVATE(l_dentry->d_inode))) {
64563 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
64564 + goto out;
64565 + }
64566 +
64567 + for (;;) {
64568 + if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
64569 + break;
64570 +
64571 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
64572 + if (!mnt_has_parent(real_mnt))
64573 + break;
64574 +
64575 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64576 + if (retval != NULL)
64577 + goto out;
64578 +
64579 + dentry = real_mnt->mnt_mountpoint;
64580 + real_mnt = real_mnt->mnt_parent;
64581 + mnt = &real_mnt->mnt;
64582 + continue;
64583 + }
64584 +
64585 + parent = dentry->d_parent;
64586 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64587 + if (retval != NULL)
64588 + goto out;
64589 +
64590 + dentry = parent;
64591 + }
64592 +
64593 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
64594 +
64595 + /* gr_real_root is pinned so we don't have to hold a reference */
64596 + if (retval == NULL)
64597 + retval = full_lookup(l_dentry, l_mnt, gr_real_root.dentry, subj, &path, checkglob);
64598 +out:
64599 + write_sequnlock(&rename_lock);
64600 + br_read_unlock(&vfsmount_lock);
64601 +
64602 + BUG_ON(retval == NULL);
64603 +
64604 + return retval;
64605 +}
64606 +
64607 +static __inline__ struct acl_object_label *
64608 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64609 + const struct acl_subject_label *subj)
64610 +{
64611 + char *path = NULL;
64612 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
64613 +}
64614 +
64615 +static __inline__ struct acl_object_label *
64616 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64617 + const struct acl_subject_label *subj)
64618 +{
64619 + char *path = NULL;
64620 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
64621 +}
64622 +
64623 +static __inline__ struct acl_object_label *
64624 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64625 + const struct acl_subject_label *subj, char *path)
64626 +{
64627 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
64628 +}
64629 +
64630 +struct acl_subject_label *
64631 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
64632 + const struct acl_role_label *role)
64633 +{
64634 + struct dentry *dentry = (struct dentry *) l_dentry;
64635 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
64636 + struct mount *real_mnt = real_mount(mnt);
64637 + struct acl_subject_label *retval;
64638 + struct dentry *parent;
64639 +
64640 + br_read_lock(&vfsmount_lock);
64641 + write_seqlock(&rename_lock);
64642 +
64643 + for (;;) {
64644 + if (dentry == gr_real_root.dentry && mnt == gr_real_root.mnt)
64645 + break;
64646 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
64647 + if (!mnt_has_parent(real_mnt))
64648 + break;
64649 +
64650 + spin_lock(&dentry->d_lock);
64651 + read_lock(&gr_inode_lock);
64652 + retval =
64653 + lookup_acl_subj_label(dentry->d_inode->i_ino,
64654 + __get_dev(dentry), role);
64655 + read_unlock(&gr_inode_lock);
64656 + spin_unlock(&dentry->d_lock);
64657 + if (retval != NULL)
64658 + goto out;
64659 +
64660 + dentry = real_mnt->mnt_mountpoint;
64661 + real_mnt = real_mnt->mnt_parent;
64662 + mnt = &real_mnt->mnt;
64663 + continue;
64664 + }
64665 +
64666 + spin_lock(&dentry->d_lock);
64667 + read_lock(&gr_inode_lock);
64668 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
64669 + __get_dev(dentry), role);
64670 + read_unlock(&gr_inode_lock);
64671 + parent = dentry->d_parent;
64672 + spin_unlock(&dentry->d_lock);
64673 +
64674 + if (retval != NULL)
64675 + goto out;
64676 +
64677 + dentry = parent;
64678 + }
64679 +
64680 + spin_lock(&dentry->d_lock);
64681 + read_lock(&gr_inode_lock);
64682 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
64683 + __get_dev(dentry), role);
64684 + read_unlock(&gr_inode_lock);
64685 + spin_unlock(&dentry->d_lock);
64686 +
64687 + if (unlikely(retval == NULL)) {
64688 + /* gr_real_root is pinned, we don't need to hold a reference */
64689 + read_lock(&gr_inode_lock);
64690 + retval = lookup_acl_subj_label(gr_real_root.dentry->d_inode->i_ino,
64691 + __get_dev(gr_real_root.dentry), role);
64692 + read_unlock(&gr_inode_lock);
64693 + }
64694 +out:
64695 + write_sequnlock(&rename_lock);
64696 + br_read_unlock(&vfsmount_lock);
64697 +
64698 + BUG_ON(retval == NULL);
64699 +
64700 + return retval;
64701 +}
64702 +
64703 +void
64704 +assign_special_role(const char *rolename)
64705 +{
64706 + struct acl_object_label *obj;
64707 + struct acl_role_label *r;
64708 + struct acl_role_label *assigned = NULL;
64709 + struct task_struct *tsk;
64710 + struct file *filp;
64711 +
64712 + FOR_EACH_ROLE_START(r)
64713 + if (!strcmp(rolename, r->rolename) &&
64714 + (r->roletype & GR_ROLE_SPECIAL)) {
64715 + assigned = r;
64716 + break;
64717 + }
64718 + FOR_EACH_ROLE_END(r)
64719 +
64720 + if (!assigned)
64721 + return;
64722 +
64723 + read_lock(&tasklist_lock);
64724 + read_lock(&grsec_exec_file_lock);
64725 +
64726 + tsk = current->real_parent;
64727 + if (tsk == NULL)
64728 + goto out_unlock;
64729 +
64730 + filp = tsk->exec_file;
64731 + if (filp == NULL)
64732 + goto out_unlock;
64733 +
64734 + tsk->is_writable = 0;
64735 + tsk->inherited = 0;
64736 +
64737 + tsk->acl_sp_role = 1;
64738 + tsk->acl_role_id = ++acl_sp_role_value;
64739 + tsk->role = assigned;
64740 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
64741 +
64742 + /* ignore additional mmap checks for processes that are writable
64743 + by the default ACL */
64744 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
64745 + if (unlikely(obj->mode & GR_WRITE))
64746 + tsk->is_writable = 1;
64747 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
64748 + if (unlikely(obj->mode & GR_WRITE))
64749 + tsk->is_writable = 1;
64750 +
64751 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
64752 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename,
64753 + tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
64754 +#endif
64755 +
64756 +out_unlock:
64757 + read_unlock(&grsec_exec_file_lock);
64758 + read_unlock(&tasklist_lock);
64759 + return;
64760 +}
64761 +
64762 +
64763 +static void
64764 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
64765 +{
64766 + struct task_struct *task = current;
64767 + const struct cred *cred = current_cred();
64768 +
64769 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
64770 + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64771 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64772 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
64773 +
64774 + return;
64775 +}
64776 +
64777 +static void
64778 +gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
64779 +{
64780 + struct task_struct *task = current;
64781 + const struct cred *cred = current_cred();
64782 +
64783 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
64784 + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64785 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64786 + 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
64787 +
64788 + return;
64789 +}
64790 +
64791 +static void
64792 +gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
64793 +{
64794 + struct task_struct *task = current;
64795 + const struct cred *cred = current_cred();
64796 +
64797 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
64798 + GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
64799 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
64800 + 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
64801 +
64802 + return;
64803 +}
64804 +
64805 +static void
64806 +gr_set_proc_res(struct task_struct *task)
64807 +{
64808 + struct acl_subject_label *proc;
64809 + unsigned short i;
64810 +
64811 + proc = task->acl;
64812 +
64813 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
64814 + return;
64815 +
64816 + for (i = 0; i < RLIM_NLIMITS; i++) {
64817 + if (!(proc->resmask & (1U << i)))
64818 + continue;
64819 +
64820 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
64821 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
64822 +
64823 + if (i == RLIMIT_CPU)
64824 + update_rlimit_cpu(task, proc->res[i].rlim_cur);
64825 + }
64826 +
64827 + return;
64828 +}
64829 +
64830 +/* both of the below must be called with
64831 + rcu_read_lock();
64832 + read_lock(&tasklist_lock);
64833 + read_lock(&grsec_exec_file_lock);
64834 +*/
64835 +
64836 +struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename)
64837 +{
64838 + char *tmpname;
64839 + struct acl_subject_label *tmpsubj;
64840 + struct file *filp;
64841 + struct name_entry *nmatch;
64842 +
64843 + filp = task->exec_file;
64844 + if (filp == NULL)
64845 + return NULL;
64846 +
64847 + /* the following is to apply the correct subject
64848 + on binaries running when the RBAC system
64849 + is enabled, when the binaries have been
64850 + replaced or deleted since their execution
64851 + -----
64852 + when the RBAC system starts, the inode/dev
64853 + from exec_file will be one the RBAC system
64854 + is unaware of. It only knows the inode/dev
64855 + of the present file on disk, or the absence
64856 + of it.
64857 + */
64858 +
64859 + if (filename)
64860 + nmatch = __lookup_name_entry(state, filename);
64861 + else {
64862 + preempt_disable();
64863 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
64864 +
64865 + nmatch = __lookup_name_entry(state, tmpname);
64866 + preempt_enable();
64867 + }
64868 + tmpsubj = NULL;
64869 + if (nmatch) {
64870 + if (nmatch->deleted)
64871 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
64872 + else
64873 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
64874 + }
64875 + /* this also works for the reload case -- if we don't match a potentially inherited subject
64876 + then we fall back to a normal lookup based on the binary's ino/dev
64877 + */
64878 + if (tmpsubj == NULL)
64879 + tmpsubj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, task->role);
64880 +
64881 + return tmpsubj;
64882 +}
64883 +
64884 +static struct acl_subject_label *gr_get_subject_for_task(struct task_struct *task, const char *filename)
64885 +{
64886 + return __gr_get_subject_for_task(&running_polstate, task, filename);
64887 +}
64888 +
64889 +void __gr_apply_subject_to_task(const struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj)
64890 +{
64891 + struct acl_object_label *obj;
64892 + struct file *filp;
64893 +
64894 + filp = task->exec_file;
64895 +
64896 + task->acl = subj;
64897 + task->is_writable = 0;
64898 + /* ignore additional mmap checks for processes that are writable
64899 + by the default ACL */
64900 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, state->default_role->root_label);
64901 + if (unlikely(obj->mode & GR_WRITE))
64902 + task->is_writable = 1;
64903 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
64904 + if (unlikely(obj->mode & GR_WRITE))
64905 + task->is_writable = 1;
64906 +
64907 + gr_set_proc_res(task);
64908 +
64909 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
64910 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
64911 +#endif
64912 +}
64913 +
64914 +static void gr_apply_subject_to_task(struct task_struct *task, struct acl_subject_label *subj)
64915 +{
64916 + __gr_apply_subject_to_task(&running_polstate, task, subj);
64917 +}
64918 +
64919 +__u32
64920 +gr_search_file(const struct dentry * dentry, const __u32 mode,
64921 + const struct vfsmount * mnt)
64922 +{
64923 + __u32 retval = mode;
64924 + struct acl_subject_label *curracl;
64925 + struct acl_object_label *currobj;
64926 +
64927 + if (unlikely(!(gr_status & GR_READY)))
64928 + return (mode & ~GR_AUDITS);
64929 +
64930 + curracl = current->acl;
64931 +
64932 + currobj = chk_obj_label(dentry, mnt, curracl);
64933 + retval = currobj->mode & mode;
64934 +
64935 + /* if we're opening a specified transfer file for writing
64936 + (e.g. /dev/initctl), then transfer our role to init
64937 + */
64938 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
64939 + current->role->roletype & GR_ROLE_PERSIST)) {
64940 + struct task_struct *task = init_pid_ns.child_reaper;
64941 +
64942 + if (task->role != current->role) {
64943 + struct acl_subject_label *subj;
64944 +
64945 + task->acl_sp_role = 0;
64946 + task->acl_role_id = current->acl_role_id;
64947 + task->role = current->role;
64948 + rcu_read_lock();
64949 + read_lock(&grsec_exec_file_lock);
64950 + subj = gr_get_subject_for_task(task, NULL);
64951 + gr_apply_subject_to_task(task, subj);
64952 + read_unlock(&grsec_exec_file_lock);
64953 + rcu_read_unlock();
64954 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
64955 + }
64956 + }
64957 +
64958 + if (unlikely
64959 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
64960 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
64961 + __u32 new_mode = mode;
64962 +
64963 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
64964 +
64965 + retval = new_mode;
64966 +
64967 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
64968 + new_mode |= GR_INHERIT;
64969 +
64970 + if (!(mode & GR_NOLEARN))
64971 + gr_log_learn(dentry, mnt, new_mode);
64972 + }
64973 +
64974 + return retval;
64975 +}
64976 +
64977 +struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
64978 + const struct dentry *parent,
64979 + const struct vfsmount *mnt)
64980 +{
64981 + struct name_entry *match;
64982 + struct acl_object_label *matchpo;
64983 + struct acl_subject_label *curracl;
64984 + char *path;
64985 +
64986 + if (unlikely(!(gr_status & GR_READY)))
64987 + return NULL;
64988 +
64989 + preempt_disable();
64990 + path = gr_to_filename_rbac(new_dentry, mnt);
64991 + match = lookup_name_entry_create(path);
64992 +
64993 + curracl = current->acl;
64994 +
64995 + if (match) {
64996 + read_lock(&gr_inode_lock);
64997 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
64998 + read_unlock(&gr_inode_lock);
64999 +
65000 + if (matchpo) {
65001 + preempt_enable();
65002 + return matchpo;
65003 + }
65004 + }
65005 +
65006 + // lookup parent
65007 +
65008 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
65009 +
65010 + preempt_enable();
65011 + return matchpo;
65012 +}
65013 +
65014 +__u32
65015 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
65016 + const struct vfsmount * mnt, const __u32 mode)
65017 +{
65018 + struct acl_object_label *matchpo;
65019 + __u32 retval;
65020 +
65021 + if (unlikely(!(gr_status & GR_READY)))
65022 + return (mode & ~GR_AUDITS);
65023 +
65024 + matchpo = gr_get_create_object(new_dentry, parent, mnt);
65025 +
65026 + retval = matchpo->mode & mode;
65027 +
65028 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
65029 + && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
65030 + __u32 new_mode = mode;
65031 +
65032 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
65033 +
65034 + gr_log_learn(new_dentry, mnt, new_mode);
65035 + return new_mode;
65036 + }
65037 +
65038 + return retval;
65039 +}
65040 +
65041 +__u32
65042 +gr_check_link(const struct dentry * new_dentry,
65043 + const struct dentry * parent_dentry,
65044 + const struct vfsmount * parent_mnt,
65045 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
65046 +{
65047 + struct acl_object_label *obj;
65048 + __u32 oldmode, newmode;
65049 + __u32 needmode;
65050 + __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
65051 + GR_DELETE | GR_INHERIT;
65052 +
65053 + if (unlikely(!(gr_status & GR_READY)))
65054 + return (GR_CREATE | GR_LINK);
65055 +
65056 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
65057 + oldmode = obj->mode;
65058 +
65059 + obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
65060 + newmode = obj->mode;
65061 +
65062 + needmode = newmode & checkmodes;
65063 +
65064 + // old name for hardlink must have at least the permissions of the new name
65065 + if ((oldmode & needmode) != needmode)
65066 + goto bad;
65067 +
65068 + // if old name had restrictions/auditing, make sure the new name does as well
65069 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
65070 +
65071 + // don't allow hardlinking of suid/sgid/fcapped files without permission
65072 + if (is_privileged_binary(old_dentry))
65073 + needmode |= GR_SETID;
65074 +
65075 + if ((newmode & needmode) != needmode)
65076 + goto bad;
65077 +
65078 + // enforce minimum permissions
65079 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
65080 + return newmode;
65081 +bad:
65082 + needmode = oldmode;
65083 + if (is_privileged_binary(old_dentry))
65084 + needmode |= GR_SETID;
65085 +
65086 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
65087 + gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
65088 + return (GR_CREATE | GR_LINK);
65089 + } else if (newmode & GR_SUPPRESS)
65090 + return GR_SUPPRESS;
65091 + else
65092 + return 0;
65093 +}
65094 +
65095 +int
65096 +gr_check_hidden_task(const struct task_struct *task)
65097 +{
65098 + if (unlikely(!(gr_status & GR_READY)))
65099 + return 0;
65100 +
65101 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
65102 + return 1;
65103 +
65104 + return 0;
65105 +}
65106 +
65107 +int
65108 +gr_check_protected_task(const struct task_struct *task)
65109 +{
65110 + if (unlikely(!(gr_status & GR_READY) || !task))
65111 + return 0;
65112 +
65113 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
65114 + task->acl != current->acl)
65115 + return 1;
65116 +
65117 + return 0;
65118 +}
65119 +
65120 +int
65121 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
65122 +{
65123 + struct task_struct *p;
65124 + int ret = 0;
65125 +
65126 + if (unlikely(!(gr_status & GR_READY) || !pid))
65127 + return ret;
65128 +
65129 + read_lock(&tasklist_lock);
65130 + do_each_pid_task(pid, type, p) {
65131 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
65132 + p->acl != current->acl) {
65133 + ret = 1;
65134 + goto out;
65135 + }
65136 + } while_each_pid_task(pid, type, p);
65137 +out:
65138 + read_unlock(&tasklist_lock);
65139 +
65140 + return ret;
65141 +}
65142 +
65143 +void
65144 +gr_copy_label(struct task_struct *tsk)
65145 +{
65146 + struct task_struct *p = current;
65147 +
65148 + tsk->inherited = p->inherited;
65149 + tsk->acl_sp_role = 0;
65150 + tsk->acl_role_id = p->acl_role_id;
65151 + tsk->acl = p->acl;
65152 + tsk->role = p->role;
65153 + tsk->signal->used_accept = 0;
65154 + tsk->signal->curr_ip = p->signal->curr_ip;
65155 + tsk->signal->saved_ip = p->signal->saved_ip;
65156 + if (p->exec_file)
65157 + get_file(p->exec_file);
65158 + tsk->exec_file = p->exec_file;
65159 + tsk->is_writable = p->is_writable;
65160 + if (unlikely(p->signal->used_accept)) {
65161 + p->signal->curr_ip = 0;
65162 + p->signal->saved_ip = 0;
65163 + }
65164 +
65165 + return;
65166 +}
65167 +
65168 +extern int gr_process_kernel_setuid_ban(struct user_struct *user);
65169 +
65170 +int
65171 +gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
65172 +{
65173 + unsigned int i;
65174 + __u16 num;
65175 + uid_t *uidlist;
65176 + uid_t curuid;
65177 + int realok = 0;
65178 + int effectiveok = 0;
65179 + int fsok = 0;
65180 + uid_t globalreal, globaleffective, globalfs;
65181 +
65182 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
65183 + struct user_struct *user;
65184 +
65185 + if (!uid_valid(real))
65186 + goto skipit;
65187 +
65188 + /* find user based on global namespace */
65189 +
65190 + globalreal = GR_GLOBAL_UID(real);
65191 +
65192 + user = find_user(make_kuid(&init_user_ns, globalreal));
65193 + if (user == NULL)
65194 + goto skipit;
65195 +
65196 + if (gr_process_kernel_setuid_ban(user)) {
65197 + /* for find_user */
65198 + free_uid(user);
65199 + return 1;
65200 + }
65201 +
65202 + /* for find_user */
65203 + free_uid(user);
65204 +
65205 +skipit:
65206 +#endif
65207 +
65208 + if (unlikely(!(gr_status & GR_READY)))
65209 + return 0;
65210 +
65211 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
65212 + gr_log_learn_uid_change(real, effective, fs);
65213 +
65214 + num = current->acl->user_trans_num;
65215 + uidlist = current->acl->user_transitions;
65216 +
65217 + if (uidlist == NULL)
65218 + return 0;
65219 +
65220 + if (!uid_valid(real)) {
65221 + realok = 1;
65222 + globalreal = (uid_t)-1;
65223 + } else {
65224 + globalreal = GR_GLOBAL_UID(real);
65225 + }
65226 + if (!uid_valid(effective)) {
65227 + effectiveok = 1;
65228 + globaleffective = (uid_t)-1;
65229 + } else {
65230 + globaleffective = GR_GLOBAL_UID(effective);
65231 + }
65232 + if (!uid_valid(fs)) {
65233 + fsok = 1;
65234 + globalfs = (uid_t)-1;
65235 + } else {
65236 + globalfs = GR_GLOBAL_UID(fs);
65237 + }
65238 +
65239 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
65240 + for (i = 0; i < num; i++) {
65241 + curuid = uidlist[i];
65242 + if (globalreal == curuid)
65243 + realok = 1;
65244 + if (globaleffective == curuid)
65245 + effectiveok = 1;
65246 + if (globalfs == curuid)
65247 + fsok = 1;
65248 + }
65249 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
65250 + for (i = 0; i < num; i++) {
65251 + curuid = uidlist[i];
65252 + if (globalreal == curuid)
65253 + break;
65254 + if (globaleffective == curuid)
65255 + break;
65256 + if (globalfs == curuid)
65257 + break;
65258 + }
65259 + /* not in deny list */
65260 + if (i == num) {
65261 + realok = 1;
65262 + effectiveok = 1;
65263 + fsok = 1;
65264 + }
65265 + }
65266 +
65267 + if (realok && effectiveok && fsok)
65268 + return 0;
65269 + else {
65270 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
65271 + return 1;
65272 + }
65273 +}
65274 +
65275 +int
65276 +gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
65277 +{
65278 + unsigned int i;
65279 + __u16 num;
65280 + gid_t *gidlist;
65281 + gid_t curgid;
65282 + int realok = 0;
65283 + int effectiveok = 0;
65284 + int fsok = 0;
65285 + gid_t globalreal, globaleffective, globalfs;
65286 +
65287 + if (unlikely(!(gr_status & GR_READY)))
65288 + return 0;
65289 +
65290 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
65291 + gr_log_learn_gid_change(real, effective, fs);
65292 +
65293 + num = current->acl->group_trans_num;
65294 + gidlist = current->acl->group_transitions;
65295 +
65296 + if (gidlist == NULL)
65297 + return 0;
65298 +
65299 + if (!gid_valid(real)) {
65300 + realok = 1;
65301 + globalreal = (gid_t)-1;
65302 + } else {
65303 + globalreal = GR_GLOBAL_GID(real);
65304 + }
65305 + if (!gid_valid(effective)) {
65306 + effectiveok = 1;
65307 + globaleffective = (gid_t)-1;
65308 + } else {
65309 + globaleffective = GR_GLOBAL_GID(effective);
65310 + }
65311 + if (!gid_valid(fs)) {
65312 + fsok = 1;
65313 + globalfs = (gid_t)-1;
65314 + } else {
65315 + globalfs = GR_GLOBAL_GID(fs);
65316 + }
65317 +
65318 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
65319 + for (i = 0; i < num; i++) {
65320 + curgid = gidlist[i];
65321 + if (globalreal == curgid)
65322 + realok = 1;
65323 + if (globaleffective == curgid)
65324 + effectiveok = 1;
65325 + if (globalfs == curgid)
65326 + fsok = 1;
65327 + }
65328 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
65329 + for (i = 0; i < num; i++) {
65330 + curgid = gidlist[i];
65331 + if (globalreal == curgid)
65332 + break;
65333 + if (globaleffective == curgid)
65334 + break;
65335 + if (globalfs == curgid)
65336 + break;
65337 + }
65338 + /* not in deny list */
65339 + if (i == num) {
65340 + realok = 1;
65341 + effectiveok = 1;
65342 + fsok = 1;
65343 + }
65344 + }
65345 +
65346 + if (realok && effectiveok && fsok)
65347 + return 0;
65348 + else {
65349 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
65350 + return 1;
65351 + }
65352 +}
65353 +
65354 +extern int gr_acl_is_capable(const int cap);
65355 +
65356 +void
65357 +gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
65358 +{
65359 + struct acl_role_label *role = task->role;
65360 + struct acl_subject_label *subj = NULL;
65361 + struct acl_object_label *obj;
65362 + struct file *filp;
65363 + uid_t uid;
65364 + gid_t gid;
65365 +
65366 + if (unlikely(!(gr_status & GR_READY)))
65367 + return;
65368 +
65369 + uid = GR_GLOBAL_UID(kuid);
65370 + gid = GR_GLOBAL_GID(kgid);
65371 +
65372 + filp = task->exec_file;
65373 +
65374 + /* kernel process, we'll give them the kernel role */
65375 + if (unlikely(!filp)) {
65376 + task->role = running_polstate.kernel_role;
65377 + task->acl = running_polstate.kernel_role->root_label;
65378 + return;
65379 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) {
65380 + /* save the current ip at time of role lookup so that the proper
65381 + IP will be learned for role_allowed_ip */
65382 + task->signal->saved_ip = task->signal->curr_ip;
65383 + role = lookup_acl_role_label(task, uid, gid);
65384 + }
65385 +
65386 + /* don't change the role if we're not a privileged process */
65387 + if (role && task->role != role &&
65388 + (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
65389 + ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
65390 + return;
65391 +
65392 + /* perform subject lookup in possibly new role
65393 + we can use this result below in the case where role == task->role
65394 + */
65395 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
65396 +
65397 + /* if we changed uid/gid, but result in the same role
65398 + and are using inheritance, don't lose the inherited subject
65399 + if current subject is other than what normal lookup
65400 + would result in, we arrived via inheritance, don't
65401 + lose subject
65402 + */
65403 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
65404 + (subj == task->acl)))
65405 + task->acl = subj;
65406 +
65407 + /* leave task->inherited unaffected */
65408 +
65409 + task->role = role;
65410 +
65411 + task->is_writable = 0;
65412 +
65413 + /* ignore additional mmap checks for processes that are writable
65414 + by the default ACL */
65415 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
65416 + if (unlikely(obj->mode & GR_WRITE))
65417 + task->is_writable = 1;
65418 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
65419 + if (unlikely(obj->mode & GR_WRITE))
65420 + task->is_writable = 1;
65421 +
65422 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
65423 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
65424 +#endif
65425 +
65426 + gr_set_proc_res(task);
65427 +
65428 + return;
65429 +}
65430 +
65431 +int
65432 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
65433 + const int unsafe_flags)
65434 +{
65435 + struct task_struct *task = current;
65436 + struct acl_subject_label *newacl;
65437 + struct acl_object_label *obj;
65438 + __u32 retmode;
65439 +
65440 + if (unlikely(!(gr_status & GR_READY)))
65441 + return 0;
65442 +
65443 + newacl = chk_subj_label(dentry, mnt, task->role);
65444 +
65445 + /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
65446 + did an exec
65447 + */
65448 + rcu_read_lock();
65449 + read_lock(&tasklist_lock);
65450 + if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
65451 + (task->parent->acl->mode & GR_POVERRIDE))) {
65452 + read_unlock(&tasklist_lock);
65453 + rcu_read_unlock();
65454 + goto skip_check;
65455 + }
65456 + read_unlock(&tasklist_lock);
65457 + rcu_read_unlock();
65458 +
65459 + if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
65460 + !(task->role->roletype & GR_ROLE_GOD) &&
65461 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
65462 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
65463 + if (unsafe_flags & LSM_UNSAFE_SHARE)
65464 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
65465 + else
65466 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
65467 + return -EACCES;
65468 + }
65469 +
65470 +skip_check:
65471 +
65472 + obj = chk_obj_label(dentry, mnt, task->acl);
65473 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
65474 +
65475 + if (!(task->acl->mode & GR_INHERITLEARN) &&
65476 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
65477 + if (obj->nested)
65478 + task->acl = obj->nested;
65479 + else
65480 + task->acl = newacl;
65481 + task->inherited = 0;
65482 + } else {
65483 + task->inherited = 1;
65484 + if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
65485 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
65486 + }
65487 +
65488 + task->is_writable = 0;
65489 +
65490 + /* ignore additional mmap checks for processes that are writable
65491 + by the default ACL */
65492 + obj = chk_obj_label(dentry, mnt, running_polstate.default_role->root_label);
65493 + if (unlikely(obj->mode & GR_WRITE))
65494 + task->is_writable = 1;
65495 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
65496 + if (unlikely(obj->mode & GR_WRITE))
65497 + task->is_writable = 1;
65498 +
65499 + gr_set_proc_res(task);
65500 +
65501 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
65502 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
65503 +#endif
65504 + return 0;
65505 +}
65506 +
65507 +/* always called with valid inodev ptr */
65508 +static void
65509 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
65510 +{
65511 + struct acl_object_label *matchpo;
65512 + struct acl_subject_label *matchps;
65513 + struct acl_subject_label *subj;
65514 + struct acl_role_label *role;
65515 + unsigned int x;
65516 +
65517 + FOR_EACH_ROLE_START(role)
65518 + FOR_EACH_SUBJECT_START(role, subj, x)
65519 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
65520 + matchpo->mode |= GR_DELETED;
65521 + FOR_EACH_SUBJECT_END(subj,x)
65522 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
65523 + /* nested subjects aren't in the role's subj_hash table */
65524 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
65525 + matchpo->mode |= GR_DELETED;
65526 + FOR_EACH_NESTED_SUBJECT_END(subj)
65527 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
65528 + matchps->mode |= GR_DELETED;
65529 + FOR_EACH_ROLE_END(role)
65530 +
65531 + inodev->nentry->deleted = 1;
65532 +
65533 + return;
65534 +}
65535 +
65536 +void
65537 +gr_handle_delete(const ino_t ino, const dev_t dev)
65538 +{
65539 + struct inodev_entry *inodev;
65540 +
65541 + if (unlikely(!(gr_status & GR_READY)))
65542 + return;
65543 +
65544 + write_lock(&gr_inode_lock);
65545 + inodev = lookup_inodev_entry(ino, dev);
65546 + if (inodev != NULL)
65547 + do_handle_delete(inodev, ino, dev);
65548 + write_unlock(&gr_inode_lock);
65549 +
65550 + return;
65551 +}
65552 +
65553 +static void
65554 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
65555 + const ino_t newinode, const dev_t newdevice,
65556 + struct acl_subject_label *subj)
65557 +{
65558 + unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
65559 + struct acl_object_label *match;
65560 +
65561 + match = subj->obj_hash[index];
65562 +
65563 + while (match && (match->inode != oldinode ||
65564 + match->device != olddevice ||
65565 + !(match->mode & GR_DELETED)))
65566 + match = match->next;
65567 +
65568 + if (match && (match->inode == oldinode)
65569 + && (match->device == olddevice)
65570 + && (match->mode & GR_DELETED)) {
65571 + if (match->prev == NULL) {
65572 + subj->obj_hash[index] = match->next;
65573 + if (match->next != NULL)
65574 + match->next->prev = NULL;
65575 + } else {
65576 + match->prev->next = match->next;
65577 + if (match->next != NULL)
65578 + match->next->prev = match->prev;
65579 + }
65580 + match->prev = NULL;
65581 + match->next = NULL;
65582 + match->inode = newinode;
65583 + match->device = newdevice;
65584 + match->mode &= ~GR_DELETED;
65585 +
65586 + insert_acl_obj_label(match, subj);
65587 + }
65588 +
65589 + return;
65590 +}
65591 +
65592 +static void
65593 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
65594 + const ino_t newinode, const dev_t newdevice,
65595 + struct acl_role_label *role)
65596 +{
65597 + unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
65598 + struct acl_subject_label *match;
65599 +
65600 + match = role->subj_hash[index];
65601 +
65602 + while (match && (match->inode != oldinode ||
65603 + match->device != olddevice ||
65604 + !(match->mode & GR_DELETED)))
65605 + match = match->next;
65606 +
65607 + if (match && (match->inode == oldinode)
65608 + && (match->device == olddevice)
65609 + && (match->mode & GR_DELETED)) {
65610 + if (match->prev == NULL) {
65611 + role->subj_hash[index] = match->next;
65612 + if (match->next != NULL)
65613 + match->next->prev = NULL;
65614 + } else {
65615 + match->prev->next = match->next;
65616 + if (match->next != NULL)
65617 + match->next->prev = match->prev;
65618 + }
65619 + match->prev = NULL;
65620 + match->next = NULL;
65621 + match->inode = newinode;
65622 + match->device = newdevice;
65623 + match->mode &= ~GR_DELETED;
65624 +
65625 + insert_acl_subj_label(match, role);
65626 + }
65627 +
65628 + return;
65629 +}
65630 +
65631 +static void
65632 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
65633 + const ino_t newinode, const dev_t newdevice)
65634 +{
65635 + unsigned int index = gr_fhash(oldinode, olddevice, running_polstate.inodev_set.i_size);
65636 + struct inodev_entry *match;
65637 +
65638 + match = running_polstate.inodev_set.i_hash[index];
65639 +
65640 + while (match && (match->nentry->inode != oldinode ||
65641 + match->nentry->device != olddevice || !match->nentry->deleted))
65642 + match = match->next;
65643 +
65644 + if (match && (match->nentry->inode == oldinode)
65645 + && (match->nentry->device == olddevice) &&
65646 + match->nentry->deleted) {
65647 + if (match->prev == NULL) {
65648 + running_polstate.inodev_set.i_hash[index] = match->next;
65649 + if (match->next != NULL)
65650 + match->next->prev = NULL;
65651 + } else {
65652 + match->prev->next = match->next;
65653 + if (match->next != NULL)
65654 + match->next->prev = match->prev;
65655 + }
65656 + match->prev = NULL;
65657 + match->next = NULL;
65658 + match->nentry->inode = newinode;
65659 + match->nentry->device = newdevice;
65660 + match->nentry->deleted = 0;
65661 +
65662 + insert_inodev_entry(match);
65663 + }
65664 +
65665 + return;
65666 +}
65667 +
65668 +static void
65669 +__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
65670 +{
65671 + struct acl_subject_label *subj;
65672 + struct acl_role_label *role;
65673 + unsigned int x;
65674 +
65675 + FOR_EACH_ROLE_START(role)
65676 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
65677 +
65678 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
65679 + if ((subj->inode == ino) && (subj->device == dev)) {
65680 + subj->inode = ino;
65681 + subj->device = dev;
65682 + }
65683 + /* nested subjects aren't in the role's subj_hash table */
65684 + update_acl_obj_label(matchn->inode, matchn->device,
65685 + ino, dev, subj);
65686 + FOR_EACH_NESTED_SUBJECT_END(subj)
65687 + FOR_EACH_SUBJECT_START(role, subj, x)
65688 + update_acl_obj_label(matchn->inode, matchn->device,
65689 + ino, dev, subj);
65690 + FOR_EACH_SUBJECT_END(subj,x)
65691 + FOR_EACH_ROLE_END(role)
65692 +
65693 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
65694 +
65695 + return;
65696 +}
65697 +
65698 +static void
65699 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
65700 + const struct vfsmount *mnt)
65701 +{
65702 + ino_t ino = dentry->d_inode->i_ino;
65703 + dev_t dev = __get_dev(dentry);
65704 +
65705 + __do_handle_create(matchn, ino, dev);
65706 +
65707 + return;
65708 +}
65709 +
65710 +void
65711 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
65712 +{
65713 + struct name_entry *matchn;
65714 +
65715 + if (unlikely(!(gr_status & GR_READY)))
65716 + return;
65717 +
65718 + preempt_disable();
65719 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
65720 +
65721 + if (unlikely((unsigned long)matchn)) {
65722 + write_lock(&gr_inode_lock);
65723 + do_handle_create(matchn, dentry, mnt);
65724 + write_unlock(&gr_inode_lock);
65725 + }
65726 + preempt_enable();
65727 +
65728 + return;
65729 +}
65730 +
65731 +void
65732 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
65733 +{
65734 + struct name_entry *matchn;
65735 +
65736 + if (unlikely(!(gr_status & GR_READY)))
65737 + return;
65738 +
65739 + preempt_disable();
65740 + matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
65741 +
65742 + if (unlikely((unsigned long)matchn)) {
65743 + write_lock(&gr_inode_lock);
65744 + __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
65745 + write_unlock(&gr_inode_lock);
65746 + }
65747 + preempt_enable();
65748 +
65749 + return;
65750 +}
65751 +
65752 +void
65753 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
65754 + struct dentry *old_dentry,
65755 + struct dentry *new_dentry,
65756 + struct vfsmount *mnt, const __u8 replace)
65757 +{
65758 + struct name_entry *matchn;
65759 + struct inodev_entry *inodev;
65760 + struct inode *inode = new_dentry->d_inode;
65761 + ino_t old_ino = old_dentry->d_inode->i_ino;
65762 + dev_t old_dev = __get_dev(old_dentry);
65763 +
65764 + /* vfs_rename swaps the name and parent link for old_dentry and
65765 + new_dentry
65766 + at this point, old_dentry has the new name, parent link, and inode
65767 + for the renamed file
65768 + if a file is being replaced by a rename, new_dentry has the inode
65769 + and name for the replaced file
65770 + */
65771 +
65772 + if (unlikely(!(gr_status & GR_READY)))
65773 + return;
65774 +
65775 + preempt_disable();
65776 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
65777 +
65778 + /* we wouldn't have to check d_inode if it weren't for
65779 + NFS silly-renaming
65780 + */
65781 +
65782 + write_lock(&gr_inode_lock);
65783 + if (unlikely(replace && inode)) {
65784 + ino_t new_ino = inode->i_ino;
65785 + dev_t new_dev = __get_dev(new_dentry);
65786 +
65787 + inodev = lookup_inodev_entry(new_ino, new_dev);
65788 + if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
65789 + do_handle_delete(inodev, new_ino, new_dev);
65790 + }
65791 +
65792 + inodev = lookup_inodev_entry(old_ino, old_dev);
65793 + if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
65794 + do_handle_delete(inodev, old_ino, old_dev);
65795 +
65796 + if (unlikely((unsigned long)matchn))
65797 + do_handle_create(matchn, old_dentry, mnt);
65798 +
65799 + write_unlock(&gr_inode_lock);
65800 + preempt_enable();
65801 +
65802 + return;
65803 +}
65804 +
65805 +#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
65806 +static const unsigned long res_learn_bumps[GR_NLIMITS] = {
65807 + [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
65808 + [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
65809 + [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
65810 + [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
65811 + [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
65812 + [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
65813 + [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
65814 + [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
65815 + [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
65816 + [RLIMIT_AS] = GR_RLIM_AS_BUMP,
65817 + [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
65818 + [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
65819 + [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
65820 + [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
65821 + [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
65822 + [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
65823 +};
65824 +
65825 +void
65826 +gr_learn_resource(const struct task_struct *task,
65827 + const int res, const unsigned long wanted, const int gt)
65828 +{
65829 + struct acl_subject_label *acl;
65830 + const struct cred *cred;
65831 +
65832 + if (unlikely((gr_status & GR_READY) &&
65833 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
65834 + goto skip_reslog;
65835 +
65836 + gr_log_resource(task, res, wanted, gt);
65837 +skip_reslog:
65838 +
65839 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
65840 + return;
65841 +
65842 + acl = task->acl;
65843 +
65844 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
65845 + !(acl->resmask & (1U << (unsigned short) res))))
65846 + return;
65847 +
65848 + if (wanted >= acl->res[res].rlim_cur) {
65849 + unsigned long res_add;
65850 +
65851 + res_add = wanted + res_learn_bumps[res];
65852 +
65853 + acl->res[res].rlim_cur = res_add;
65854 +
65855 + if (wanted > acl->res[res].rlim_max)
65856 + acl->res[res].rlim_max = res_add;
65857 +
65858 + /* only log the subject filename, since resource logging is supported for
65859 + single-subject learning only */
65860 + rcu_read_lock();
65861 + cred = __task_cred(task);
65862 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
65863 + task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
65864 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
65865 + "", (unsigned long) res, &task->signal->saved_ip);
65866 + rcu_read_unlock();
65867 + }
65868 +
65869 + return;
65870 +}
65871 +EXPORT_SYMBOL(gr_learn_resource);
65872 +#endif
65873 +
65874 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
65875 +void
65876 +pax_set_initial_flags(struct linux_binprm *bprm)
65877 +{
65878 + struct task_struct *task = current;
65879 + struct acl_subject_label *proc;
65880 + unsigned long flags;
65881 +
65882 + if (unlikely(!(gr_status & GR_READY)))
65883 + return;
65884 +
65885 + flags = pax_get_flags(task);
65886 +
65887 + proc = task->acl;
65888 +
65889 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
65890 + flags &= ~MF_PAX_PAGEEXEC;
65891 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
65892 + flags &= ~MF_PAX_SEGMEXEC;
65893 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
65894 + flags &= ~MF_PAX_RANDMMAP;
65895 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
65896 + flags &= ~MF_PAX_EMUTRAMP;
65897 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
65898 + flags &= ~MF_PAX_MPROTECT;
65899 +
65900 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
65901 + flags |= MF_PAX_PAGEEXEC;
65902 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
65903 + flags |= MF_PAX_SEGMEXEC;
65904 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
65905 + flags |= MF_PAX_RANDMMAP;
65906 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
65907 + flags |= MF_PAX_EMUTRAMP;
65908 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
65909 + flags |= MF_PAX_MPROTECT;
65910 +
65911 + pax_set_flags(task, flags);
65912 +
65913 + return;
65914 +}
65915 +#endif
65916 +
65917 +int
65918 +gr_handle_proc_ptrace(struct task_struct *task)
65919 +{
65920 + struct file *filp;
65921 + struct task_struct *tmp = task;
65922 + struct task_struct *curtemp = current;
65923 + __u32 retmode;
65924 +
65925 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
65926 + if (unlikely(!(gr_status & GR_READY)))
65927 + return 0;
65928 +#endif
65929 +
65930 + read_lock(&tasklist_lock);
65931 + read_lock(&grsec_exec_file_lock);
65932 + filp = task->exec_file;
65933 +
65934 + while (task_pid_nr(tmp) > 0) {
65935 + if (tmp == curtemp)
65936 + break;
65937 + tmp = tmp->real_parent;
65938 + }
65939 +
65940 + if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
65941 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
65942 + read_unlock(&grsec_exec_file_lock);
65943 + read_unlock(&tasklist_lock);
65944 + return 1;
65945 + }
65946 +
65947 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65948 + if (!(gr_status & GR_READY)) {
65949 + read_unlock(&grsec_exec_file_lock);
65950 + read_unlock(&tasklist_lock);
65951 + return 0;
65952 + }
65953 +#endif
65954 +
65955 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
65956 + read_unlock(&grsec_exec_file_lock);
65957 + read_unlock(&tasklist_lock);
65958 +
65959 + if (retmode & GR_NOPTRACE)
65960 + return 1;
65961 +
65962 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
65963 + && (current->acl != task->acl || (current->acl != current->role->root_label
65964 + && task_pid_nr(current) != task_pid_nr(task))))
65965 + return 1;
65966 +
65967 + return 0;
65968 +}
65969 +
65970 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
65971 +{
65972 + if (unlikely(!(gr_status & GR_READY)))
65973 + return;
65974 +
65975 + if (!(current->role->roletype & GR_ROLE_GOD))
65976 + return;
65977 +
65978 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
65979 + p->role->rolename, gr_task_roletype_to_char(p),
65980 + p->acl->filename);
65981 +}
65982 +
65983 +int
65984 +gr_handle_ptrace(struct task_struct *task, const long request)
65985 +{
65986 + struct task_struct *tmp = task;
65987 + struct task_struct *curtemp = current;
65988 + __u32 retmode;
65989 +
65990 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
65991 + if (unlikely(!(gr_status & GR_READY)))
65992 + return 0;
65993 +#endif
65994 + if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
65995 + read_lock(&tasklist_lock);
65996 + while (task_pid_nr(tmp) > 0) {
65997 + if (tmp == curtemp)
65998 + break;
65999 + tmp = tmp->real_parent;
66000 + }
66001 +
66002 + if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
66003 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
66004 + read_unlock(&tasklist_lock);
66005 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
66006 + return 1;
66007 + }
66008 + read_unlock(&tasklist_lock);
66009 + }
66010 +
66011 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
66012 + if (!(gr_status & GR_READY))
66013 + return 0;
66014 +#endif
66015 +
66016 + read_lock(&grsec_exec_file_lock);
66017 + if (unlikely(!task->exec_file)) {
66018 + read_unlock(&grsec_exec_file_lock);
66019 + return 0;
66020 + }
66021 +
66022 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
66023 + read_unlock(&grsec_exec_file_lock);
66024 +
66025 + if (retmode & GR_NOPTRACE) {
66026 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
66027 + return 1;
66028 + }
66029 +
66030 + if (retmode & GR_PTRACERD) {
66031 + switch (request) {
66032 + case PTRACE_SEIZE:
66033 + case PTRACE_POKETEXT:
66034 + case PTRACE_POKEDATA:
66035 + case PTRACE_POKEUSR:
66036 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
66037 + case PTRACE_SETREGS:
66038 + case PTRACE_SETFPREGS:
66039 +#endif
66040 +#ifdef CONFIG_X86
66041 + case PTRACE_SETFPXREGS:
66042 +#endif
66043 +#ifdef CONFIG_ALTIVEC
66044 + case PTRACE_SETVRREGS:
66045 +#endif
66046 + return 1;
66047 + default:
66048 + return 0;
66049 + }
66050 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
66051 + !(current->role->roletype & GR_ROLE_GOD) &&
66052 + (current->acl != task->acl)) {
66053 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
66054 + return 1;
66055 + }
66056 +
66057 + return 0;
66058 +}
66059 +
66060 +static int is_writable_mmap(const struct file *filp)
66061 +{
66062 + struct task_struct *task = current;
66063 + struct acl_object_label *obj, *obj2;
66064 +
66065 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
66066 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
66067 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, running_polstate.default_role->root_label);
66068 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
66069 + task->role->root_label);
66070 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
66071 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
66072 + return 1;
66073 + }
66074 + }
66075 + return 0;
66076 +}
66077 +
66078 +int
66079 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
66080 +{
66081 + __u32 mode;
66082 +
66083 + if (unlikely(!file || !(prot & PROT_EXEC)))
66084 + return 1;
66085 +
66086 + if (is_writable_mmap(file))
66087 + return 0;
66088 +
66089 + mode =
66090 + gr_search_file(file->f_path.dentry,
66091 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
66092 + file->f_path.mnt);
66093 +
66094 + if (!gr_tpe_allow(file))
66095 + return 0;
66096 +
66097 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
66098 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66099 + return 0;
66100 + } else if (unlikely(!(mode & GR_EXEC))) {
66101 + return 0;
66102 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
66103 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66104 + return 1;
66105 + }
66106 +
66107 + return 1;
66108 +}
66109 +
66110 +int
66111 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
66112 +{
66113 + __u32 mode;
66114 +
66115 + if (unlikely(!file || !(prot & PROT_EXEC)))
66116 + return 1;
66117 +
66118 + if (is_writable_mmap(file))
66119 + return 0;
66120 +
66121 + mode =
66122 + gr_search_file(file->f_path.dentry,
66123 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
66124 + file->f_path.mnt);
66125 +
66126 + if (!gr_tpe_allow(file))
66127 + return 0;
66128 +
66129 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
66130 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66131 + return 0;
66132 + } else if (unlikely(!(mode & GR_EXEC))) {
66133 + return 0;
66134 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
66135 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
66136 + return 1;
66137 + }
66138 +
66139 + return 1;
66140 +}
66141 +
66142 +void
66143 +gr_acl_handle_psacct(struct task_struct *task, const long code)
66144 +{
66145 + unsigned long runtime;
66146 + unsigned long cputime;
66147 + unsigned int wday, cday;
66148 + __u8 whr, chr;
66149 + __u8 wmin, cmin;
66150 + __u8 wsec, csec;
66151 + struct timespec timeval;
66152 +
66153 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
66154 + !(task->acl->mode & GR_PROCACCT)))
66155 + return;
66156 +
66157 + do_posix_clock_monotonic_gettime(&timeval);
66158 + runtime = timeval.tv_sec - task->start_time.tv_sec;
66159 + wday = runtime / (3600 * 24);
66160 + runtime -= wday * (3600 * 24);
66161 + whr = runtime / 3600;
66162 + runtime -= whr * 3600;
66163 + wmin = runtime / 60;
66164 + runtime -= wmin * 60;
66165 + wsec = runtime;
66166 +
66167 + cputime = (task->utime + task->stime) / HZ;
66168 + cday = cputime / (3600 * 24);
66169 + cputime -= cday * (3600 * 24);
66170 + chr = cputime / 3600;
66171 + cputime -= chr * 3600;
66172 + cmin = cputime / 60;
66173 + cputime -= cmin * 60;
66174 + csec = cputime;
66175 +
66176 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
66177 +
66178 + return;
66179 +}
66180 +
66181 +#ifdef CONFIG_TASKSTATS
66182 +int gr_is_taskstats_denied(int pid)
66183 +{
66184 + struct task_struct *task;
66185 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66186 + const struct cred *cred;
66187 +#endif
66188 + int ret = 0;
66189 +
66190 + /* restrict taskstats viewing to un-chrooted root users
66191 + who have the 'view' subject flag if the RBAC system is enabled
66192 + */
66193 +
66194 + rcu_read_lock();
66195 + read_lock(&tasklist_lock);
66196 + task = find_task_by_vpid(pid);
66197 + if (task) {
66198 +#ifdef CONFIG_GRKERNSEC_CHROOT
66199 + if (proc_is_chrooted(task))
66200 + ret = -EACCES;
66201 +#endif
66202 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66203 + cred = __task_cred(task);
66204 +#ifdef CONFIG_GRKERNSEC_PROC_USER
66205 + if (gr_is_global_nonroot(cred->uid))
66206 + ret = -EACCES;
66207 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
66208 + if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
66209 + ret = -EACCES;
66210 +#endif
66211 +#endif
66212 + if (gr_status & GR_READY) {
66213 + if (!(task->acl->mode & GR_VIEW))
66214 + ret = -EACCES;
66215 + }
66216 + } else
66217 + ret = -ENOENT;
66218 +
66219 + read_unlock(&tasklist_lock);
66220 + rcu_read_unlock();
66221 +
66222 + return ret;
66223 +}
66224 +#endif
66225 +
66226 +/* AUXV entries are filled via a descendant of search_binary_handler
66227 + after we've already applied the subject for the target
66228 +*/
66229 +int gr_acl_enable_at_secure(void)
66230 +{
66231 + if (unlikely(!(gr_status & GR_READY)))
66232 + return 0;
66233 +
66234 + if (current->acl->mode & GR_ATSECURE)
66235 + return 1;
66236 +
66237 + return 0;
66238 +}
66239 +
66240 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
66241 +{
66242 + struct task_struct *task = current;
66243 + struct dentry *dentry = file->f_path.dentry;
66244 + struct vfsmount *mnt = file->f_path.mnt;
66245 + struct acl_object_label *obj, *tmp;
66246 + struct acl_subject_label *subj;
66247 + unsigned int bufsize;
66248 + int is_not_root;
66249 + char *path;
66250 + dev_t dev = __get_dev(dentry);
66251 +
66252 + if (unlikely(!(gr_status & GR_READY)))
66253 + return 1;
66254 +
66255 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
66256 + return 1;
66257 +
66258 + /* ignore Eric Biederman */
66259 + if (IS_PRIVATE(dentry->d_inode))
66260 + return 1;
66261 +
66262 + subj = task->acl;
66263 + read_lock(&gr_inode_lock);
66264 + do {
66265 + obj = lookup_acl_obj_label(ino, dev, subj);
66266 + if (obj != NULL) {
66267 + read_unlock(&gr_inode_lock);
66268 + return (obj->mode & GR_FIND) ? 1 : 0;
66269 + }
66270 + } while ((subj = subj->parent_subject));
66271 + read_unlock(&gr_inode_lock);
66272 +
66273 + /* this is purely an optimization since we're looking for an object
66274 + for the directory we're doing a readdir on
66275 + if it's possible for any globbed object to match the entry we're
66276 + filling into the directory, then the object we find here will be
66277 + an anchor point with attached globbed objects
66278 + */
66279 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
66280 + if (obj->globbed == NULL)
66281 + return (obj->mode & GR_FIND) ? 1 : 0;
66282 +
66283 + is_not_root = ((obj->filename[0] == '/') &&
66284 + (obj->filename[1] == '\0')) ? 0 : 1;
66285 + bufsize = PAGE_SIZE - namelen - is_not_root;
66286 +
66287 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
66288 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
66289 + return 1;
66290 +
66291 + preempt_disable();
66292 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
66293 + bufsize);
66294 +
66295 + bufsize = strlen(path);
66296 +
66297 + /* if base is "/", don't append an additional slash */
66298 + if (is_not_root)
66299 + *(path + bufsize) = '/';
66300 + memcpy(path + bufsize + is_not_root, name, namelen);
66301 + *(path + bufsize + namelen + is_not_root) = '\0';
66302 +
66303 + tmp = obj->globbed;
66304 + while (tmp) {
66305 + if (!glob_match(tmp->filename, path)) {
66306 + preempt_enable();
66307 + return (tmp->mode & GR_FIND) ? 1 : 0;
66308 + }
66309 + tmp = tmp->next;
66310 + }
66311 + preempt_enable();
66312 + return (obj->mode & GR_FIND) ? 1 : 0;
66313 +}
66314 +
66315 +void gr_put_exec_file(struct task_struct *task)
66316 +{
66317 + struct file *filp;
66318 +
66319 + write_lock(&grsec_exec_file_lock);
66320 + filp = task->exec_file;
66321 + task->exec_file = NULL;
66322 + write_unlock(&grsec_exec_file_lock);
66323 +
66324 + if (filp)
66325 + fput(filp);
66326 +
66327 + return;
66328 +}
66329 +
66330 +
66331 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
66332 +EXPORT_SYMBOL(gr_acl_is_enabled);
66333 +#endif
66334 +#ifdef CONFIG_SECURITY
66335 +EXPORT_SYMBOL(gr_check_user_change);
66336 +EXPORT_SYMBOL(gr_check_group_change);
66337 +#endif
66338 +
66339 diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
66340 new file mode 100644
66341 index 0000000..18ffbbd
66342 --- /dev/null
66343 +++ b/grsecurity/gracl_alloc.c
66344 @@ -0,0 +1,105 @@
66345 +#include <linux/kernel.h>
66346 +#include <linux/mm.h>
66347 +#include <linux/slab.h>
66348 +#include <linux/vmalloc.h>
66349 +#include <linux/gracl.h>
66350 +#include <linux/grsecurity.h>
66351 +
66352 +static struct gr_alloc_state __current_alloc_state = { 1, 1, NULL };
66353 +struct gr_alloc_state *current_alloc_state = &__current_alloc_state;
66354 +
66355 +static __inline__ int
66356 +alloc_pop(void)
66357 +{
66358 + if (current_alloc_state->alloc_stack_next == 1)
66359 + return 0;
66360 +
66361 + kfree(current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 2]);
66362 +
66363 + current_alloc_state->alloc_stack_next--;
66364 +
66365 + return 1;
66366 +}
66367 +
66368 +static __inline__ int
66369 +alloc_push(void *buf)
66370 +{
66371 + if (current_alloc_state->alloc_stack_next >= current_alloc_state->alloc_stack_size)
66372 + return 1;
66373 +
66374 + current_alloc_state->alloc_stack[current_alloc_state->alloc_stack_next - 1] = buf;
66375 +
66376 + current_alloc_state->alloc_stack_next++;
66377 +
66378 + return 0;
66379 +}
66380 +
66381 +void *
66382 +acl_alloc(unsigned long len)
66383 +{
66384 + void *ret = NULL;
66385 +
66386 + if (!len || len > PAGE_SIZE)
66387 + goto out;
66388 +
66389 + ret = kmalloc(len, GFP_KERNEL);
66390 +
66391 + if (ret) {
66392 + if (alloc_push(ret)) {
66393 + kfree(ret);
66394 + ret = NULL;
66395 + }
66396 + }
66397 +
66398 +out:
66399 + return ret;
66400 +}
66401 +
66402 +void *
66403 +acl_alloc_num(unsigned long num, unsigned long len)
66404 +{
66405 + if (!len || (num > (PAGE_SIZE / len)))
66406 + return NULL;
66407 +
66408 + return acl_alloc(num * len);
66409 +}
66410 +
66411 +void
66412 +acl_free_all(void)
66413 +{
66414 + if (!current_alloc_state->alloc_stack)
66415 + return;
66416 +
66417 + while (alloc_pop()) ;
66418 +
66419 + if (current_alloc_state->alloc_stack) {
66420 + if ((current_alloc_state->alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
66421 + kfree(current_alloc_state->alloc_stack);
66422 + else
66423 + vfree(current_alloc_state->alloc_stack);
66424 + }
66425 +
66426 + current_alloc_state->alloc_stack = NULL;
66427 + current_alloc_state->alloc_stack_size = 1;
66428 + current_alloc_state->alloc_stack_next = 1;
66429 +
66430 + return;
66431 +}
66432 +
66433 +int
66434 +acl_alloc_stack_init(unsigned long size)
66435 +{
66436 + if ((size * sizeof (void *)) <= PAGE_SIZE)
66437 + current_alloc_state->alloc_stack =
66438 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
66439 + else
66440 + current_alloc_state->alloc_stack = (void **) vmalloc(size * sizeof (void *));
66441 +
66442 + current_alloc_state->alloc_stack_size = size;
66443 + current_alloc_state->alloc_stack_next = 1;
66444 +
66445 + if (!current_alloc_state->alloc_stack)
66446 + return 0;
66447 + else
66448 + return 1;
66449 +}
66450 diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
66451 new file mode 100644
66452 index 0000000..bdd51ea
66453 --- /dev/null
66454 +++ b/grsecurity/gracl_cap.c
66455 @@ -0,0 +1,110 @@
66456 +#include <linux/kernel.h>
66457 +#include <linux/module.h>
66458 +#include <linux/sched.h>
66459 +#include <linux/gracl.h>
66460 +#include <linux/grsecurity.h>
66461 +#include <linux/grinternal.h>
66462 +
66463 +extern const char *captab_log[];
66464 +extern int captab_log_entries;
66465 +
66466 +int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
66467 +{
66468 + struct acl_subject_label *curracl;
66469 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
66470 + kernel_cap_t cap_audit = __cap_empty_set;
66471 +
66472 + if (!gr_acl_is_enabled())
66473 + return 1;
66474 +
66475 + curracl = task->acl;
66476 +
66477 + cap_drop = curracl->cap_lower;
66478 + cap_mask = curracl->cap_mask;
66479 + cap_audit = curracl->cap_invert_audit;
66480 +
66481 + while ((curracl = curracl->parent_subject)) {
66482 + /* if the cap isn't specified in the current computed mask but is specified in the
66483 + current level subject, and is lowered in the current level subject, then add
66484 + it to the set of dropped capabilities
66485 + otherwise, add the current level subject's mask to the current computed mask
66486 + */
66487 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
66488 + cap_raise(cap_mask, cap);
66489 + if (cap_raised(curracl->cap_lower, cap))
66490 + cap_raise(cap_drop, cap);
66491 + if (cap_raised(curracl->cap_invert_audit, cap))
66492 + cap_raise(cap_audit, cap);
66493 + }
66494 + }
66495 +
66496 + if (!cap_raised(cap_drop, cap)) {
66497 + if (cap_raised(cap_audit, cap))
66498 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
66499 + return 1;
66500 + }
66501 +
66502 + curracl = task->acl;
66503 +
66504 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
66505 + && cap_raised(cred->cap_effective, cap)) {
66506 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
66507 + task->role->roletype, GR_GLOBAL_UID(cred->uid),
66508 + GR_GLOBAL_GID(cred->gid), task->exec_file ?
66509 + gr_to_filename(task->exec_file->f_path.dentry,
66510 + task->exec_file->f_path.mnt) : curracl->filename,
66511 + curracl->filename, 0UL,
66512 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
66513 + return 1;
66514 + }
66515 +
66516 + if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
66517 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
66518 +
66519 + return 0;
66520 +}
66521 +
66522 +int
66523 +gr_acl_is_capable(const int cap)
66524 +{
66525 + return gr_task_acl_is_capable(current, current_cred(), cap);
66526 +}
66527 +
66528 +int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
66529 +{
66530 + struct acl_subject_label *curracl;
66531 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
66532 +
66533 + if (!gr_acl_is_enabled())
66534 + return 1;
66535 +
66536 + curracl = task->acl;
66537 +
66538 + cap_drop = curracl->cap_lower;
66539 + cap_mask = curracl->cap_mask;
66540 +
66541 + while ((curracl = curracl->parent_subject)) {
66542 + /* if the cap isn't specified in the current computed mask but is specified in the
66543 + current level subject, and is lowered in the current level subject, then add
66544 + it to the set of dropped capabilities
66545 + otherwise, add the current level subject's mask to the current computed mask
66546 + */
66547 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
66548 + cap_raise(cap_mask, cap);
66549 + if (cap_raised(curracl->cap_lower, cap))
66550 + cap_raise(cap_drop, cap);
66551 + }
66552 + }
66553 +
66554 + if (!cap_raised(cap_drop, cap))
66555 + return 1;
66556 +
66557 + return 0;
66558 +}
66559 +
66560 +int
66561 +gr_acl_is_capable_nolog(const int cap)
66562 +{
66563 + return gr_task_acl_is_capable_nolog(current, cap);
66564 +}
66565 +
66566 diff --git a/grsecurity/gracl_compat.c b/grsecurity/gracl_compat.c
66567 new file mode 100644
66568 index 0000000..ca25605
66569 --- /dev/null
66570 +++ b/grsecurity/gracl_compat.c
66571 @@ -0,0 +1,270 @@
66572 +#include <linux/kernel.h>
66573 +#include <linux/gracl.h>
66574 +#include <linux/compat.h>
66575 +#include <linux/gracl_compat.h>
66576 +
66577 +#include <asm/uaccess.h>
66578 +
66579 +int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap)
66580 +{
66581 + struct gr_arg_wrapper_compat uwrapcompat;
66582 +
66583 + if (copy_from_user(&uwrapcompat, buf, sizeof(uwrapcompat)))
66584 + return -EFAULT;
66585 +
66586 + if (((uwrapcompat.version != GRSECURITY_VERSION) &&
66587 + (uwrapcompat.version != 0x2901)) ||
66588 + (uwrapcompat.size != sizeof(struct gr_arg_compat)))
66589 + return -EINVAL;
66590 +
66591 + uwrap->arg = compat_ptr(uwrapcompat.arg);
66592 + uwrap->version = uwrapcompat.version;
66593 + uwrap->size = sizeof(struct gr_arg);
66594 +
66595 + return 0;
66596 +}
66597 +
66598 +int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg)
66599 +{
66600 + struct gr_arg_compat argcompat;
66601 +
66602 + if (copy_from_user(&argcompat, buf, sizeof(argcompat)))
66603 + return -EFAULT;
66604 +
66605 + arg->role_db.r_table = compat_ptr(argcompat.role_db.r_table);
66606 + arg->role_db.num_pointers = argcompat.role_db.num_pointers;
66607 + arg->role_db.num_roles = argcompat.role_db.num_roles;
66608 + arg->role_db.num_domain_children = argcompat.role_db.num_domain_children;
66609 + arg->role_db.num_subjects = argcompat.role_db.num_subjects;
66610 + arg->role_db.num_objects = argcompat.role_db.num_objects;
66611 +
66612 + memcpy(&arg->pw, &argcompat.pw, sizeof(arg->pw));
66613 + memcpy(&arg->salt, &argcompat.salt, sizeof(arg->salt));
66614 + memcpy(&arg->sum, &argcompat.sum, sizeof(arg->sum));
66615 + memcpy(&arg->sp_role, &argcompat.sp_role, sizeof(arg->sp_role));
66616 + arg->sprole_pws = compat_ptr(argcompat.sprole_pws);
66617 + arg->segv_device = argcompat.segv_device;
66618 + arg->segv_inode = argcompat.segv_inode;
66619 + arg->segv_uid = argcompat.segv_uid;
66620 + arg->num_sprole_pws = argcompat.num_sprole_pws;
66621 + arg->mode = argcompat.mode;
66622 +
66623 + return 0;
66624 +}
66625 +
66626 +int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp)
66627 +{
66628 + struct acl_object_label_compat objcompat;
66629 +
66630 + if (copy_from_user(&objcompat, userp, sizeof(objcompat)))
66631 + return -EFAULT;
66632 +
66633 + obj->filename = compat_ptr(objcompat.filename);
66634 + obj->inode = objcompat.inode;
66635 + obj->device = objcompat.device;
66636 + obj->mode = objcompat.mode;
66637 +
66638 + obj->nested = compat_ptr(objcompat.nested);
66639 + obj->globbed = compat_ptr(objcompat.globbed);
66640 +
66641 + obj->prev = compat_ptr(objcompat.prev);
66642 + obj->next = compat_ptr(objcompat.next);
66643 +
66644 + return 0;
66645 +}
66646 +
66647 +int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp)
66648 +{
66649 + unsigned int i;
66650 + struct acl_subject_label_compat subjcompat;
66651 +
66652 + if (copy_from_user(&subjcompat, userp, sizeof(subjcompat)))
66653 + return -EFAULT;
66654 +
66655 + subj->filename = compat_ptr(subjcompat.filename);
66656 + subj->inode = subjcompat.inode;
66657 + subj->device = subjcompat.device;
66658 + subj->mode = subjcompat.mode;
66659 + subj->cap_mask = subjcompat.cap_mask;
66660 + subj->cap_lower = subjcompat.cap_lower;
66661 + subj->cap_invert_audit = subjcompat.cap_invert_audit;
66662 +
66663 + for (i = 0; i < GR_NLIMITS; i++) {
66664 + if (subjcompat.res[i].rlim_cur == COMPAT_RLIM_INFINITY)
66665 + subj->res[i].rlim_cur = RLIM_INFINITY;
66666 + else
66667 + subj->res[i].rlim_cur = subjcompat.res[i].rlim_cur;
66668 + if (subjcompat.res[i].rlim_max == COMPAT_RLIM_INFINITY)
66669 + subj->res[i].rlim_max = RLIM_INFINITY;
66670 + else
66671 + subj->res[i].rlim_max = subjcompat.res[i].rlim_max;
66672 + }
66673 + subj->resmask = subjcompat.resmask;
66674 +
66675 + subj->user_trans_type = subjcompat.user_trans_type;
66676 + subj->group_trans_type = subjcompat.group_trans_type;
66677 + subj->user_transitions = compat_ptr(subjcompat.user_transitions);
66678 + subj->group_transitions = compat_ptr(subjcompat.group_transitions);
66679 + subj->user_trans_num = subjcompat.user_trans_num;
66680 + subj->group_trans_num = subjcompat.group_trans_num;
66681 +
66682 + memcpy(&subj->sock_families, &subjcompat.sock_families, sizeof(subj->sock_families));
66683 + memcpy(&subj->ip_proto, &subjcompat.ip_proto, sizeof(subj->ip_proto));
66684 + subj->ip_type = subjcompat.ip_type;
66685 + subj->ips = compat_ptr(subjcompat.ips);
66686 + subj->ip_num = subjcompat.ip_num;
66687 + subj->inaddr_any_override = subjcompat.inaddr_any_override;
66688 +
66689 + subj->crashes = subjcompat.crashes;
66690 + subj->expires = subjcompat.expires;
66691 +
66692 + subj->parent_subject = compat_ptr(subjcompat.parent_subject);
66693 + subj->hash = compat_ptr(subjcompat.hash);
66694 + subj->prev = compat_ptr(subjcompat.prev);
66695 + subj->next = compat_ptr(subjcompat.next);
66696 +
66697 + subj->obj_hash = compat_ptr(subjcompat.obj_hash);
66698 + subj->obj_hash_size = subjcompat.obj_hash_size;
66699 + subj->pax_flags = subjcompat.pax_flags;
66700 +
66701 + return 0;
66702 +}
66703 +
66704 +int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp)
66705 +{
66706 + struct acl_role_label_compat rolecompat;
66707 +
66708 + if (copy_from_user(&rolecompat, userp, sizeof(rolecompat)))
66709 + return -EFAULT;
66710 +
66711 + role->rolename = compat_ptr(rolecompat.rolename);
66712 + role->uidgid = rolecompat.uidgid;
66713 + role->roletype = rolecompat.roletype;
66714 +
66715 + role->auth_attempts = rolecompat.auth_attempts;
66716 + role->expires = rolecompat.expires;
66717 +
66718 + role->root_label = compat_ptr(rolecompat.root_label);
66719 + role->hash = compat_ptr(rolecompat.hash);
66720 +
66721 + role->prev = compat_ptr(rolecompat.prev);
66722 + role->next = compat_ptr(rolecompat.next);
66723 +
66724 + role->transitions = compat_ptr(rolecompat.transitions);
66725 + role->allowed_ips = compat_ptr(rolecompat.allowed_ips);
66726 + role->domain_children = compat_ptr(rolecompat.domain_children);
66727 + role->domain_child_num = rolecompat.domain_child_num;
66728 +
66729 + role->umask = rolecompat.umask;
66730 +
66731 + role->subj_hash = compat_ptr(rolecompat.subj_hash);
66732 + role->subj_hash_size = rolecompat.subj_hash_size;
66733 +
66734 + return 0;
66735 +}
66736 +
66737 +int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
66738 +{
66739 + struct role_allowed_ip_compat roleip_compat;
66740 +
66741 + if (copy_from_user(&roleip_compat, userp, sizeof(roleip_compat)))
66742 + return -EFAULT;
66743 +
66744 + roleip->addr = roleip_compat.addr;
66745 + roleip->netmask = roleip_compat.netmask;
66746 +
66747 + roleip->prev = compat_ptr(roleip_compat.prev);
66748 + roleip->next = compat_ptr(roleip_compat.next);
66749 +
66750 + return 0;
66751 +}
66752 +
66753 +int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp)
66754 +{
66755 + struct role_transition_compat trans_compat;
66756 +
66757 + if (copy_from_user(&trans_compat, userp, sizeof(trans_compat)))
66758 + return -EFAULT;
66759 +
66760 + trans->rolename = compat_ptr(trans_compat.rolename);
66761 +
66762 + trans->prev = compat_ptr(trans_compat.prev);
66763 + trans->next = compat_ptr(trans_compat.next);
66764 +
66765 + return 0;
66766 +
66767 +}
66768 +
66769 +int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
66770 +{
66771 + struct gr_hash_struct_compat hash_compat;
66772 +
66773 + if (copy_from_user(&hash_compat, userp, sizeof(hash_compat)))
66774 + return -EFAULT;
66775 +
66776 + hash->table = compat_ptr(hash_compat.table);
66777 + hash->nametable = compat_ptr(hash_compat.nametable);
66778 + hash->first = compat_ptr(hash_compat.first);
66779 +
66780 + hash->table_size = hash_compat.table_size;
66781 + hash->used_size = hash_compat.used_size;
66782 +
66783 + hash->type = hash_compat.type;
66784 +
66785 + return 0;
66786 +}
66787 +
66788 +int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp)
66789 +{
66790 + compat_uptr_t ptrcompat;
66791 +
66792 + if (copy_from_user(&ptrcompat, userp + (idx * sizeof(ptrcompat)), sizeof(ptrcompat)))
66793 + return -EFAULT;
66794 +
66795 + *(void **)ptr = compat_ptr(ptrcompat);
66796 +
66797 + return 0;
66798 +}
66799 +
66800 +int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp)
66801 +{
66802 + struct acl_ip_label_compat ip_compat;
66803 +
66804 + if (copy_from_user(&ip_compat, userp, sizeof(ip_compat)))
66805 + return -EFAULT;
66806 +
66807 + ip->iface = compat_ptr(ip_compat.iface);
66808 + ip->addr = ip_compat.addr;
66809 + ip->netmask = ip_compat.netmask;
66810 + ip->low = ip_compat.low;
66811 + ip->high = ip_compat.high;
66812 + ip->mode = ip_compat.mode;
66813 + ip->type = ip_compat.type;
66814 +
66815 + memcpy(&ip->proto, &ip_compat.proto, sizeof(ip->proto));
66816 +
66817 + ip->prev = compat_ptr(ip_compat.prev);
66818 + ip->next = compat_ptr(ip_compat.next);
66819 +
66820 + return 0;
66821 +}
66822 +
66823 +int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
66824 +{
66825 + struct sprole_pw_compat pw_compat;
66826 +
66827 + if (copy_from_user(&pw_compat, (const void *)userp + (sizeof(pw_compat) * idx), sizeof(pw_compat)))
66828 + return -EFAULT;
66829 +
66830 + pw->rolename = compat_ptr(pw_compat.rolename);
66831 + memcpy(&pw->salt, pw_compat.salt, sizeof(pw->salt));
66832 + memcpy(&pw->sum, pw_compat.sum, sizeof(pw->sum));
66833 +
66834 + return 0;
66835 +}
66836 +
66837 +size_t get_gr_arg_wrapper_size_compat(void)
66838 +{
66839 + return sizeof(struct gr_arg_wrapper_compat);
66840 +}
66841 +
66842 diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
66843 new file mode 100644
66844 index 0000000..deb6f3b
66845 --- /dev/null
66846 +++ b/grsecurity/gracl_fs.c
66847 @@ -0,0 +1,437 @@
66848 +#include <linux/kernel.h>
66849 +#include <linux/sched.h>
66850 +#include <linux/types.h>
66851 +#include <linux/fs.h>
66852 +#include <linux/file.h>
66853 +#include <linux/stat.h>
66854 +#include <linux/grsecurity.h>
66855 +#include <linux/grinternal.h>
66856 +#include <linux/gracl.h>
66857 +
66858 +umode_t
66859 +gr_acl_umask(void)
66860 +{
66861 + if (unlikely(!gr_acl_is_enabled()))
66862 + return 0;
66863 +
66864 + return current->role->umask;
66865 +}
66866 +
66867 +__u32
66868 +gr_acl_handle_hidden_file(const struct dentry * dentry,
66869 + const struct vfsmount * mnt)
66870 +{
66871 + __u32 mode;
66872 +
66873 + if (unlikely(!dentry->d_inode))
66874 + return GR_FIND;
66875 +
66876 + mode =
66877 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
66878 +
66879 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
66880 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
66881 + return mode;
66882 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
66883 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
66884 + return 0;
66885 + } else if (unlikely(!(mode & GR_FIND)))
66886 + return 0;
66887 +
66888 + return GR_FIND;
66889 +}
66890 +
66891 +__u32
66892 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
66893 + int acc_mode)
66894 +{
66895 + __u32 reqmode = GR_FIND;
66896 + __u32 mode;
66897 +
66898 + if (unlikely(!dentry->d_inode))
66899 + return reqmode;
66900 +
66901 + if (acc_mode & MAY_APPEND)
66902 + reqmode |= GR_APPEND;
66903 + else if (acc_mode & MAY_WRITE)
66904 + reqmode |= GR_WRITE;
66905 + if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
66906 + reqmode |= GR_READ;
66907 +
66908 + mode =
66909 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
66910 + mnt);
66911 +
66912 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66913 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
66914 + reqmode & GR_READ ? " reading" : "",
66915 + reqmode & GR_WRITE ? " writing" : reqmode &
66916 + GR_APPEND ? " appending" : "");
66917 + return reqmode;
66918 + } else
66919 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
66920 + {
66921 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
66922 + reqmode & GR_READ ? " reading" : "",
66923 + reqmode & GR_WRITE ? " writing" : reqmode &
66924 + GR_APPEND ? " appending" : "");
66925 + return 0;
66926 + } else if (unlikely((mode & reqmode) != reqmode))
66927 + return 0;
66928 +
66929 + return reqmode;
66930 +}
66931 +
66932 +__u32
66933 +gr_acl_handle_creat(const struct dentry * dentry,
66934 + const struct dentry * p_dentry,
66935 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
66936 + const int imode)
66937 +{
66938 + __u32 reqmode = GR_WRITE | GR_CREATE;
66939 + __u32 mode;
66940 +
66941 + if (acc_mode & MAY_APPEND)
66942 + reqmode |= GR_APPEND;
66943 + // if a directory was required or the directory already exists, then
66944 + // don't count this open as a read
66945 + if ((acc_mode & MAY_READ) &&
66946 + !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
66947 + reqmode |= GR_READ;
66948 + if ((open_flags & O_CREAT) &&
66949 + ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
66950 + reqmode |= GR_SETID;
66951 +
66952 + mode =
66953 + gr_check_create(dentry, p_dentry, p_mnt,
66954 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
66955 +
66956 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66957 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
66958 + reqmode & GR_READ ? " reading" : "",
66959 + reqmode & GR_WRITE ? " writing" : reqmode &
66960 + GR_APPEND ? " appending" : "");
66961 + return reqmode;
66962 + } else
66963 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
66964 + {
66965 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
66966 + reqmode & GR_READ ? " reading" : "",
66967 + reqmode & GR_WRITE ? " writing" : reqmode &
66968 + GR_APPEND ? " appending" : "");
66969 + return 0;
66970 + } else if (unlikely((mode & reqmode) != reqmode))
66971 + return 0;
66972 +
66973 + return reqmode;
66974 +}
66975 +
66976 +__u32
66977 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
66978 + const int fmode)
66979 +{
66980 + __u32 mode, reqmode = GR_FIND;
66981 +
66982 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
66983 + reqmode |= GR_EXEC;
66984 + if (fmode & S_IWOTH)
66985 + reqmode |= GR_WRITE;
66986 + if (fmode & S_IROTH)
66987 + reqmode |= GR_READ;
66988 +
66989 + mode =
66990 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
66991 + mnt);
66992 +
66993 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
66994 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
66995 + reqmode & GR_READ ? " reading" : "",
66996 + reqmode & GR_WRITE ? " writing" : "",
66997 + reqmode & GR_EXEC ? " executing" : "");
66998 + return reqmode;
66999 + } else
67000 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
67001 + {
67002 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
67003 + reqmode & GR_READ ? " reading" : "",
67004 + reqmode & GR_WRITE ? " writing" : "",
67005 + reqmode & GR_EXEC ? " executing" : "");
67006 + return 0;
67007 + } else if (unlikely((mode & reqmode) != reqmode))
67008 + return 0;
67009 +
67010 + return reqmode;
67011 +}
67012 +
67013 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
67014 +{
67015 + __u32 mode;
67016 +
67017 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
67018 +
67019 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
67020 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
67021 + return mode;
67022 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
67023 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
67024 + return 0;
67025 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
67026 + return 0;
67027 +
67028 + return (reqmode);
67029 +}
67030 +
67031 +__u32
67032 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
67033 +{
67034 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
67035 +}
67036 +
67037 +__u32
67038 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
67039 +{
67040 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
67041 +}
67042 +
67043 +__u32
67044 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
67045 +{
67046 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
67047 +}
67048 +
67049 +__u32
67050 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
67051 +{
67052 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
67053 +}
67054 +
67055 +__u32
67056 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
67057 + umode_t *modeptr)
67058 +{
67059 + umode_t mode;
67060 +
67061 + *modeptr &= ~gr_acl_umask();
67062 + mode = *modeptr;
67063 +
67064 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
67065 + return 1;
67066 +
67067 + if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
67068 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
67069 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
67070 + GR_CHMOD_ACL_MSG);
67071 + } else {
67072 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
67073 + }
67074 +}
67075 +
67076 +__u32
67077 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
67078 +{
67079 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
67080 +}
67081 +
67082 +__u32
67083 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
67084 +{
67085 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
67086 +}
67087 +
67088 +__u32
67089 +gr_acl_handle_removexattr(const struct dentry *dentry, const struct vfsmount *mnt)
67090 +{
67091 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_REMOVEXATTR_ACL_MSG);
67092 +}
67093 +
67094 +__u32
67095 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
67096 +{
67097 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
67098 +}
67099 +
67100 +__u32
67101 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
67102 +{
67103 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
67104 + GR_UNIXCONNECT_ACL_MSG);
67105 +}
67106 +
67107 +/* hardlinks require at minimum create and link permission,
67108 + any additional privilege required is based on the
67109 + privilege of the file being linked to
67110 +*/
67111 +__u32
67112 +gr_acl_handle_link(const struct dentry * new_dentry,
67113 + const struct dentry * parent_dentry,
67114 + const struct vfsmount * parent_mnt,
67115 + const struct dentry * old_dentry,
67116 + const struct vfsmount * old_mnt, const struct filename *to)
67117 +{
67118 + __u32 mode;
67119 + __u32 needmode = GR_CREATE | GR_LINK;
67120 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
67121 +
67122 + mode =
67123 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
67124 + old_mnt);
67125 +
67126 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
67127 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
67128 + return mode;
67129 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
67130 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
67131 + return 0;
67132 + } else if (unlikely((mode & needmode) != needmode))
67133 + return 0;
67134 +
67135 + return 1;
67136 +}
67137 +
67138 +__u32
67139 +gr_acl_handle_symlink(const struct dentry * new_dentry,
67140 + const struct dentry * parent_dentry,
67141 + const struct vfsmount * parent_mnt, const struct filename *from)
67142 +{
67143 + __u32 needmode = GR_WRITE | GR_CREATE;
67144 + __u32 mode;
67145 +
67146 + mode =
67147 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
67148 + GR_CREATE | GR_AUDIT_CREATE |
67149 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
67150 +
67151 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
67152 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
67153 + return mode;
67154 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
67155 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
67156 + return 0;
67157 + } else if (unlikely((mode & needmode) != needmode))
67158 + return 0;
67159 +
67160 + return (GR_WRITE | GR_CREATE);
67161 +}
67162 +
67163 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
67164 +{
67165 + __u32 mode;
67166 +
67167 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
67168 +
67169 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
67170 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
67171 + return mode;
67172 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
67173 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
67174 + return 0;
67175 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
67176 + return 0;
67177 +
67178 + return (reqmode);
67179 +}
67180 +
67181 +__u32
67182 +gr_acl_handle_mknod(const struct dentry * new_dentry,
67183 + const struct dentry * parent_dentry,
67184 + const struct vfsmount * parent_mnt,
67185 + const int mode)
67186 +{
67187 + __u32 reqmode = GR_WRITE | GR_CREATE;
67188 + if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
67189 + reqmode |= GR_SETID;
67190 +
67191 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
67192 + reqmode, GR_MKNOD_ACL_MSG);
67193 +}
67194 +
67195 +__u32
67196 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
67197 + const struct dentry *parent_dentry,
67198 + const struct vfsmount *parent_mnt)
67199 +{
67200 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
67201 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
67202 +}
67203 +
67204 +#define RENAME_CHECK_SUCCESS(old, new) \
67205 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
67206 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
67207 +
67208 +int
67209 +gr_acl_handle_rename(struct dentry *new_dentry,
67210 + struct dentry *parent_dentry,
67211 + const struct vfsmount *parent_mnt,
67212 + struct dentry *old_dentry,
67213 + struct inode *old_parent_inode,
67214 + struct vfsmount *old_mnt, const struct filename *newname)
67215 +{
67216 + __u32 comp1, comp2;
67217 + int error = 0;
67218 +
67219 + if (unlikely(!gr_acl_is_enabled()))
67220 + return 0;
67221 +
67222 + if (!new_dentry->d_inode) {
67223 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
67224 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
67225 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
67226 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
67227 + GR_DELETE | GR_AUDIT_DELETE |
67228 + GR_AUDIT_READ | GR_AUDIT_WRITE |
67229 + GR_SUPPRESS, old_mnt);
67230 + } else {
67231 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
67232 + GR_CREATE | GR_DELETE |
67233 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
67234 + GR_AUDIT_READ | GR_AUDIT_WRITE |
67235 + GR_SUPPRESS, parent_mnt);
67236 + comp2 =
67237 + gr_search_file(old_dentry,
67238 + GR_READ | GR_WRITE | GR_AUDIT_READ |
67239 + GR_DELETE | GR_AUDIT_DELETE |
67240 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
67241 + }
67242 +
67243 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
67244 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
67245 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
67246 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
67247 + && !(comp2 & GR_SUPPRESS)) {
67248 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
67249 + error = -EACCES;
67250 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
67251 + error = -EACCES;
67252 +
67253 + return error;
67254 +}
67255 +
67256 +void
67257 +gr_acl_handle_exit(void)
67258 +{
67259 + u16 id;
67260 + char *rolename;
67261 +
67262 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
67263 + !(current->role->roletype & GR_ROLE_PERSIST))) {
67264 + id = current->acl_role_id;
67265 + rolename = current->role->rolename;
67266 + gr_set_acls(1);
67267 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
67268 + }
67269 +
67270 + gr_put_exec_file(current);
67271 + return;
67272 +}
67273 +
67274 +int
67275 +gr_acl_handle_procpidmem(const struct task_struct *task)
67276 +{
67277 + if (unlikely(!gr_acl_is_enabled()))
67278 + return 0;
67279 +
67280 + if (task != current && task->acl->mode & GR_PROTPROCFD)
67281 + return -EACCES;
67282 +
67283 + return 0;
67284 +}
67285 diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
67286 new file mode 100644
67287 index 0000000..f056b81
67288 --- /dev/null
67289 +++ b/grsecurity/gracl_ip.c
67290 @@ -0,0 +1,386 @@
67291 +#include <linux/kernel.h>
67292 +#include <asm/uaccess.h>
67293 +#include <asm/errno.h>
67294 +#include <net/sock.h>
67295 +#include <linux/file.h>
67296 +#include <linux/fs.h>
67297 +#include <linux/net.h>
67298 +#include <linux/in.h>
67299 +#include <linux/skbuff.h>
67300 +#include <linux/ip.h>
67301 +#include <linux/udp.h>
67302 +#include <linux/types.h>
67303 +#include <linux/sched.h>
67304 +#include <linux/netdevice.h>
67305 +#include <linux/inetdevice.h>
67306 +#include <linux/gracl.h>
67307 +#include <linux/grsecurity.h>
67308 +#include <linux/grinternal.h>
67309 +
67310 +#define GR_BIND 0x01
67311 +#define GR_CONNECT 0x02
67312 +#define GR_INVERT 0x04
67313 +#define GR_BINDOVERRIDE 0x08
67314 +#define GR_CONNECTOVERRIDE 0x10
67315 +#define GR_SOCK_FAMILY 0x20
67316 +
67317 +static const char * gr_protocols[IPPROTO_MAX] = {
67318 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
67319 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
67320 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
67321 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
67322 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
67323 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
67324 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
67325 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
67326 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
67327 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
67328 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
67329 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
67330 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
67331 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
67332 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
67333 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
67334 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
67335 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
67336 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
67337 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
67338 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
67339 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
67340 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
67341 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
67342 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
67343 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
67344 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
67345 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
67346 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
67347 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
67348 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
67349 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
67350 + };
67351 +
67352 +static const char * gr_socktypes[SOCK_MAX] = {
67353 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
67354 + "unknown:7", "unknown:8", "unknown:9", "packet"
67355 + };
67356 +
67357 +static const char * gr_sockfamilies[AF_MAX+1] = {
67358 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
67359 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
67360 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
67361 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
67362 + };
67363 +
67364 +const char *
67365 +gr_proto_to_name(unsigned char proto)
67366 +{
67367 + return gr_protocols[proto];
67368 +}
67369 +
67370 +const char *
67371 +gr_socktype_to_name(unsigned char type)
67372 +{
67373 + return gr_socktypes[type];
67374 +}
67375 +
67376 +const char *
67377 +gr_sockfamily_to_name(unsigned char family)
67378 +{
67379 + return gr_sockfamilies[family];
67380 +}
67381 +
67382 +extern const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
67383 +
67384 +int
67385 +gr_search_socket(const int domain, const int type, const int protocol)
67386 +{
67387 + struct acl_subject_label *curr;
67388 + const struct cred *cred = current_cred();
67389 +
67390 + if (unlikely(!gr_acl_is_enabled()))
67391 + goto exit;
67392 +
67393 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
67394 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
67395 + goto exit; // let the kernel handle it
67396 +
67397 + curr = current->acl;
67398 +
67399 + if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
67400 + /* the family is allowed, if this is PF_INET allow it only if
67401 + the extra sock type/protocol checks pass */
67402 + if (domain == PF_INET)
67403 + goto inet_check;
67404 + goto exit;
67405 + } else {
67406 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67407 + __u32 fakeip = 0;
67408 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67409 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
67410 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
67411 + gr_to_filename(current->exec_file->f_path.dentry,
67412 + current->exec_file->f_path.mnt) :
67413 + curr->filename, curr->filename,
67414 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
67415 + &current->signal->saved_ip);
67416 + goto exit;
67417 + }
67418 + goto exit_fail;
67419 + }
67420 +
67421 +inet_check:
67422 + /* the rest of this checking is for IPv4 only */
67423 + if (!curr->ips)
67424 + goto exit;
67425 +
67426 + if ((curr->ip_type & (1U << type)) &&
67427 + (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
67428 + goto exit;
67429 +
67430 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67431 + /* we don't place acls on raw sockets , and sometimes
67432 + dgram/ip sockets are opened for ioctl and not
67433 + bind/connect, so we'll fake a bind learn log */
67434 + if (type == SOCK_RAW || type == SOCK_PACKET) {
67435 + __u32 fakeip = 0;
67436 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67437 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
67438 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
67439 + gr_to_filename(current->exec_file->f_path.dentry,
67440 + current->exec_file->f_path.mnt) :
67441 + curr->filename, curr->filename,
67442 + &fakeip, 0, type,
67443 + protocol, GR_CONNECT, &current->signal->saved_ip);
67444 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
67445 + __u32 fakeip = 0;
67446 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67447 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
67448 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
67449 + gr_to_filename(current->exec_file->f_path.dentry,
67450 + current->exec_file->f_path.mnt) :
67451 + curr->filename, curr->filename,
67452 + &fakeip, 0, type,
67453 + protocol, GR_BIND, &current->signal->saved_ip);
67454 + }
67455 + /* we'll log when they use connect or bind */
67456 + goto exit;
67457 + }
67458 +
67459 +exit_fail:
67460 + if (domain == PF_INET)
67461 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
67462 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
67463 + else if (rcu_access_pointer(net_families[domain]) != NULL)
67464 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
67465 + gr_socktype_to_name(type), protocol);
67466 +
67467 + return 0;
67468 +exit:
67469 + return 1;
67470 +}
67471 +
67472 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
67473 +{
67474 + if ((ip->mode & mode) &&
67475 + (ip_port >= ip->low) &&
67476 + (ip_port <= ip->high) &&
67477 + ((ntohl(ip_addr) & our_netmask) ==
67478 + (ntohl(our_addr) & our_netmask))
67479 + && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
67480 + && (ip->type & (1U << type))) {
67481 + if (ip->mode & GR_INVERT)
67482 + return 2; // specifically denied
67483 + else
67484 + return 1; // allowed
67485 + }
67486 +
67487 + return 0; // not specifically allowed, may continue parsing
67488 +}
67489 +
67490 +static int
67491 +gr_search_connectbind(const int full_mode, struct sock *sk,
67492 + struct sockaddr_in *addr, const int type)
67493 +{
67494 + char iface[IFNAMSIZ] = {0};
67495 + struct acl_subject_label *curr;
67496 + struct acl_ip_label *ip;
67497 + struct inet_sock *isk;
67498 + struct net_device *dev;
67499 + struct in_device *idev;
67500 + unsigned long i;
67501 + int ret;
67502 + int mode = full_mode & (GR_BIND | GR_CONNECT);
67503 + __u32 ip_addr = 0;
67504 + __u32 our_addr;
67505 + __u32 our_netmask;
67506 + char *p;
67507 + __u16 ip_port = 0;
67508 + const struct cred *cred = current_cred();
67509 +
67510 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
67511 + return 0;
67512 +
67513 + curr = current->acl;
67514 + isk = inet_sk(sk);
67515 +
67516 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
67517 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
67518 + addr->sin_addr.s_addr = curr->inaddr_any_override;
67519 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
67520 + struct sockaddr_in saddr;
67521 + int err;
67522 +
67523 + saddr.sin_family = AF_INET;
67524 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
67525 + saddr.sin_port = isk->inet_sport;
67526 +
67527 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
67528 + if (err)
67529 + return err;
67530 +
67531 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
67532 + if (err)
67533 + return err;
67534 + }
67535 +
67536 + if (!curr->ips)
67537 + return 0;
67538 +
67539 + ip_addr = addr->sin_addr.s_addr;
67540 + ip_port = ntohs(addr->sin_port);
67541 +
67542 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
67543 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
67544 + current->role->roletype, GR_GLOBAL_UID(cred->uid),
67545 + GR_GLOBAL_GID(cred->gid), current->exec_file ?
67546 + gr_to_filename(current->exec_file->f_path.dentry,
67547 + current->exec_file->f_path.mnt) :
67548 + curr->filename, curr->filename,
67549 + &ip_addr, ip_port, type,
67550 + sk->sk_protocol, mode, &current->signal->saved_ip);
67551 + return 0;
67552 + }
67553 +
67554 + for (i = 0; i < curr->ip_num; i++) {
67555 + ip = *(curr->ips + i);
67556 + if (ip->iface != NULL) {
67557 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
67558 + p = strchr(iface, ':');
67559 + if (p != NULL)
67560 + *p = '\0';
67561 + dev = dev_get_by_name(sock_net(sk), iface);
67562 + if (dev == NULL)
67563 + continue;
67564 + idev = in_dev_get(dev);
67565 + if (idev == NULL) {
67566 + dev_put(dev);
67567 + continue;
67568 + }
67569 + rcu_read_lock();
67570 + for_ifa(idev) {
67571 + if (!strcmp(ip->iface, ifa->ifa_label)) {
67572 + our_addr = ifa->ifa_address;
67573 + our_netmask = 0xffffffff;
67574 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
67575 + if (ret == 1) {
67576 + rcu_read_unlock();
67577 + in_dev_put(idev);
67578 + dev_put(dev);
67579 + return 0;
67580 + } else if (ret == 2) {
67581 + rcu_read_unlock();
67582 + in_dev_put(idev);
67583 + dev_put(dev);
67584 + goto denied;
67585 + }
67586 + }
67587 + } endfor_ifa(idev);
67588 + rcu_read_unlock();
67589 + in_dev_put(idev);
67590 + dev_put(dev);
67591 + } else {
67592 + our_addr = ip->addr;
67593 + our_netmask = ip->netmask;
67594 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
67595 + if (ret == 1)
67596 + return 0;
67597 + else if (ret == 2)
67598 + goto denied;
67599 + }
67600 + }
67601 +
67602 +denied:
67603 + if (mode == GR_BIND)
67604 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
67605 + else if (mode == GR_CONNECT)
67606 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
67607 +
67608 + return -EACCES;
67609 +}
67610 +
67611 +int
67612 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
67613 +{
67614 + /* always allow disconnection of dgram sockets with connect */
67615 + if (addr->sin_family == AF_UNSPEC)
67616 + return 0;
67617 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
67618 +}
67619 +
67620 +int
67621 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
67622 +{
67623 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
67624 +}
67625 +
67626 +int gr_search_listen(struct socket *sock)
67627 +{
67628 + struct sock *sk = sock->sk;
67629 + struct sockaddr_in addr;
67630 +
67631 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
67632 + addr.sin_port = inet_sk(sk)->inet_sport;
67633 +
67634 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
67635 +}
67636 +
67637 +int gr_search_accept(struct socket *sock)
67638 +{
67639 + struct sock *sk = sock->sk;
67640 + struct sockaddr_in addr;
67641 +
67642 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
67643 + addr.sin_port = inet_sk(sk)->inet_sport;
67644 +
67645 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
67646 +}
67647 +
67648 +int
67649 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
67650 +{
67651 + if (addr)
67652 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
67653 + else {
67654 + struct sockaddr_in sin;
67655 + const struct inet_sock *inet = inet_sk(sk);
67656 +
67657 + sin.sin_addr.s_addr = inet->inet_daddr;
67658 + sin.sin_port = inet->inet_dport;
67659 +
67660 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
67661 + }
67662 +}
67663 +
67664 +int
67665 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
67666 +{
67667 + struct sockaddr_in sin;
67668 +
67669 + if (unlikely(skb->len < sizeof (struct udphdr)))
67670 + return 0; // skip this packet
67671 +
67672 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
67673 + sin.sin_port = udp_hdr(skb)->source;
67674 +
67675 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
67676 +}
67677 diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
67678 new file mode 100644
67679 index 0000000..25f54ef
67680 --- /dev/null
67681 +++ b/grsecurity/gracl_learn.c
67682 @@ -0,0 +1,207 @@
67683 +#include <linux/kernel.h>
67684 +#include <linux/mm.h>
67685 +#include <linux/sched.h>
67686 +#include <linux/poll.h>
67687 +#include <linux/string.h>
67688 +#include <linux/file.h>
67689 +#include <linux/types.h>
67690 +#include <linux/vmalloc.h>
67691 +#include <linux/grinternal.h>
67692 +
67693 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
67694 + size_t count, loff_t *ppos);
67695 +extern int gr_acl_is_enabled(void);
67696 +
67697 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
67698 +static int gr_learn_attached;
67699 +
67700 +/* use a 512k buffer */
67701 +#define LEARN_BUFFER_SIZE (512 * 1024)
67702 +
67703 +static DEFINE_SPINLOCK(gr_learn_lock);
67704 +static DEFINE_MUTEX(gr_learn_user_mutex);
67705 +
67706 +/* we need to maintain two buffers, so that the kernel context of grlearn
67707 + uses a semaphore around the userspace copying, and the other kernel contexts
67708 + use a spinlock when copying into the buffer, since they cannot sleep
67709 +*/
67710 +static char *learn_buffer;
67711 +static char *learn_buffer_user;
67712 +static int learn_buffer_len;
67713 +static int learn_buffer_user_len;
67714 +
67715 +static ssize_t
67716 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
67717 +{
67718 + DECLARE_WAITQUEUE(wait, current);
67719 + ssize_t retval = 0;
67720 +
67721 + add_wait_queue(&learn_wait, &wait);
67722 + set_current_state(TASK_INTERRUPTIBLE);
67723 + do {
67724 + mutex_lock(&gr_learn_user_mutex);
67725 + spin_lock(&gr_learn_lock);
67726 + if (learn_buffer_len)
67727 + break;
67728 + spin_unlock(&gr_learn_lock);
67729 + mutex_unlock(&gr_learn_user_mutex);
67730 + if (file->f_flags & O_NONBLOCK) {
67731 + retval = -EAGAIN;
67732 + goto out;
67733 + }
67734 + if (signal_pending(current)) {
67735 + retval = -ERESTARTSYS;
67736 + goto out;
67737 + }
67738 +
67739 + schedule();
67740 + } while (1);
67741 +
67742 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
67743 + learn_buffer_user_len = learn_buffer_len;
67744 + retval = learn_buffer_len;
67745 + learn_buffer_len = 0;
67746 +
67747 + spin_unlock(&gr_learn_lock);
67748 +
67749 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
67750 + retval = -EFAULT;
67751 +
67752 + mutex_unlock(&gr_learn_user_mutex);
67753 +out:
67754 + set_current_state(TASK_RUNNING);
67755 + remove_wait_queue(&learn_wait, &wait);
67756 + return retval;
67757 +}
67758 +
67759 +static unsigned int
67760 +poll_learn(struct file * file, poll_table * wait)
67761 +{
67762 + poll_wait(file, &learn_wait, wait);
67763 +
67764 + if (learn_buffer_len)
67765 + return (POLLIN | POLLRDNORM);
67766 +
67767 + return 0;
67768 +}
67769 +
67770 +void
67771 +gr_clear_learn_entries(void)
67772 +{
67773 + char *tmp;
67774 +
67775 + mutex_lock(&gr_learn_user_mutex);
67776 + spin_lock(&gr_learn_lock);
67777 + tmp = learn_buffer;
67778 + learn_buffer = NULL;
67779 + spin_unlock(&gr_learn_lock);
67780 + if (tmp)
67781 + vfree(tmp);
67782 + if (learn_buffer_user != NULL) {
67783 + vfree(learn_buffer_user);
67784 + learn_buffer_user = NULL;
67785 + }
67786 + learn_buffer_len = 0;
67787 + mutex_unlock(&gr_learn_user_mutex);
67788 +
67789 + return;
67790 +}
67791 +
67792 +void
67793 +gr_add_learn_entry(const char *fmt, ...)
67794 +{
67795 + va_list args;
67796 + unsigned int len;
67797 +
67798 + if (!gr_learn_attached)
67799 + return;
67800 +
67801 + spin_lock(&gr_learn_lock);
67802 +
67803 + /* leave a gap at the end so we know when it's "full" but don't have to
67804 + compute the exact length of the string we're trying to append
67805 + */
67806 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
67807 + spin_unlock(&gr_learn_lock);
67808 + wake_up_interruptible(&learn_wait);
67809 + return;
67810 + }
67811 + if (learn_buffer == NULL) {
67812 + spin_unlock(&gr_learn_lock);
67813 + return;
67814 + }
67815 +
67816 + va_start(args, fmt);
67817 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
67818 + va_end(args);
67819 +
67820 + learn_buffer_len += len + 1;
67821 +
67822 + spin_unlock(&gr_learn_lock);
67823 + wake_up_interruptible(&learn_wait);
67824 +
67825 + return;
67826 +}
67827 +
67828 +static int
67829 +open_learn(struct inode *inode, struct file *file)
67830 +{
67831 + if (file->f_mode & FMODE_READ && gr_learn_attached)
67832 + return -EBUSY;
67833 + if (file->f_mode & FMODE_READ) {
67834 + int retval = 0;
67835 + mutex_lock(&gr_learn_user_mutex);
67836 + if (learn_buffer == NULL)
67837 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
67838 + if (learn_buffer_user == NULL)
67839 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
67840 + if (learn_buffer == NULL) {
67841 + retval = -ENOMEM;
67842 + goto out_error;
67843 + }
67844 + if (learn_buffer_user == NULL) {
67845 + retval = -ENOMEM;
67846 + goto out_error;
67847 + }
67848 + learn_buffer_len = 0;
67849 + learn_buffer_user_len = 0;
67850 + gr_learn_attached = 1;
67851 +out_error:
67852 + mutex_unlock(&gr_learn_user_mutex);
67853 + return retval;
67854 + }
67855 + return 0;
67856 +}
67857 +
67858 +static int
67859 +close_learn(struct inode *inode, struct file *file)
67860 +{
67861 + if (file->f_mode & FMODE_READ) {
67862 + char *tmp = NULL;
67863 + mutex_lock(&gr_learn_user_mutex);
67864 + spin_lock(&gr_learn_lock);
67865 + tmp = learn_buffer;
67866 + learn_buffer = NULL;
67867 + spin_unlock(&gr_learn_lock);
67868 + if (tmp)
67869 + vfree(tmp);
67870 + if (learn_buffer_user != NULL) {
67871 + vfree(learn_buffer_user);
67872 + learn_buffer_user = NULL;
67873 + }
67874 + learn_buffer_len = 0;
67875 + learn_buffer_user_len = 0;
67876 + gr_learn_attached = 0;
67877 + mutex_unlock(&gr_learn_user_mutex);
67878 + }
67879 +
67880 + return 0;
67881 +}
67882 +
67883 +const struct file_operations grsec_fops = {
67884 + .read = read_learn,
67885 + .write = write_grsec_handler,
67886 + .open = open_learn,
67887 + .release = close_learn,
67888 + .poll = poll_learn,
67889 +};
67890 diff --git a/grsecurity/gracl_policy.c b/grsecurity/gracl_policy.c
67891 new file mode 100644
67892 index 0000000..361a099
67893 --- /dev/null
67894 +++ b/grsecurity/gracl_policy.c
67895 @@ -0,0 +1,1782 @@
67896 +#include <linux/kernel.h>
67897 +#include <linux/module.h>
67898 +#include <linux/sched.h>
67899 +#include <linux/mm.h>
67900 +#include <linux/file.h>
67901 +#include <linux/fs.h>
67902 +#include <linux/namei.h>
67903 +#include <linux/mount.h>
67904 +#include <linux/tty.h>
67905 +#include <linux/proc_fs.h>
67906 +#include <linux/lglock.h>
67907 +#include <linux/slab.h>
67908 +#include <linux/vmalloc.h>
67909 +#include <linux/types.h>
67910 +#include <linux/sysctl.h>
67911 +#include <linux/netdevice.h>
67912 +#include <linux/ptrace.h>
67913 +#include <linux/gracl.h>
67914 +#include <linux/gralloc.h>
67915 +#include <linux/security.h>
67916 +#include <linux/grinternal.h>
67917 +#include <linux/pid_namespace.h>
67918 +#include <linux/stop_machine.h>
67919 +#include <linux/fdtable.h>
67920 +#include <linux/percpu.h>
67921 +#include <linux/lglock.h>
67922 +#include <linux/hugetlb.h>
67923 +#include <linux/posix-timers.h>
67924 +#include "../fs/mount.h"
67925 +
67926 +#include <asm/uaccess.h>
67927 +#include <asm/errno.h>
67928 +#include <asm/mman.h>
67929 +
67930 +extern struct gr_policy_state *polstate;
67931 +
67932 +#define FOR_EACH_ROLE_START(role) \
67933 + role = polstate->role_list; \
67934 + while (role) {
67935 +
67936 +#define FOR_EACH_ROLE_END(role) \
67937 + role = role->prev; \
67938 + }
67939 +
67940 +struct path gr_real_root;
67941 +
67942 +extern struct gr_alloc_state *current_alloc_state;
67943 +
67944 +u16 acl_sp_role_value;
67945 +
67946 +static DEFINE_MUTEX(gr_dev_mutex);
67947 +
67948 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
67949 +extern void gr_clear_learn_entries(void);
67950 +
67951 +static struct gr_arg gr_usermode;
67952 +static unsigned char gr_system_salt[GR_SALT_LEN];
67953 +static unsigned char gr_system_sum[GR_SHA_LEN];
67954 +
67955 +static unsigned int gr_auth_attempts = 0;
67956 +static unsigned long gr_auth_expires = 0UL;
67957 +
67958 +struct acl_object_label *fakefs_obj_rw;
67959 +struct acl_object_label *fakefs_obj_rwx;
67960 +
67961 +extern int gr_init_uidset(void);
67962 +extern void gr_free_uidset(void);
67963 +extern void gr_remove_uid(uid_t uid);
67964 +extern int gr_find_uid(uid_t uid);
67965 +
67966 +extern struct acl_subject_label *__gr_get_subject_for_task(const struct gr_policy_state *state, struct task_struct *task, const char *filename);
67967 +extern void __gr_apply_subject_to_task(struct gr_policy_state *state, struct task_struct *task, struct acl_subject_label *subj);
67968 +extern int gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb);
67969 +extern void __insert_inodev_entry(const struct gr_policy_state *state, struct inodev_entry *entry);
67970 +extern struct acl_role_label *__lookup_acl_role_label(const struct gr_policy_state *state, const struct task_struct *task, const uid_t uid, const gid_t gid);
67971 +extern void insert_acl_obj_label(struct acl_object_label *obj, struct acl_subject_label *subj);
67972 +extern void insert_acl_subj_label(struct acl_subject_label *obj, struct acl_role_label *role);
67973 +extern struct name_entry * __lookup_name_entry(const struct gr_policy_state *state, const char *name);
67974 +extern char *gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt);
67975 +extern struct acl_subject_label *lookup_acl_subj_label(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
67976 +extern struct acl_subject_label *lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, const struct acl_role_label *role);
67977 +extern void assign_special_role(const char *rolename);
67978 +extern struct acl_subject_label *chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, const struct acl_role_label *role);
67979 +extern int gr_rbac_disable(void *unused);
67980 +extern void gr_enable_rbac_system(void);
67981 +
67982 +static int copy_acl_object_label_normal(struct acl_object_label *obj, const struct acl_object_label *userp)
67983 +{
67984 + if (copy_from_user(obj, userp, sizeof(struct acl_object_label)))
67985 + return -EFAULT;
67986 +
67987 + return 0;
67988 +}
67989 +
67990 +static int copy_acl_ip_label_normal(struct acl_ip_label *ip, const struct acl_ip_label *userp)
67991 +{
67992 + if (copy_from_user(ip, userp, sizeof(struct acl_ip_label)))
67993 + return -EFAULT;
67994 +
67995 + return 0;
67996 +}
67997 +
67998 +static int copy_acl_subject_label_normal(struct acl_subject_label *subj, const struct acl_subject_label *userp)
67999 +{
68000 + if (copy_from_user(subj, userp, sizeof(struct acl_subject_label)))
68001 + return -EFAULT;
68002 +
68003 + return 0;
68004 +}
68005 +
68006 +static int copy_acl_role_label_normal(struct acl_role_label *role, const struct acl_role_label *userp)
68007 +{
68008 + if (copy_from_user(role, userp, sizeof(struct acl_role_label)))
68009 + return -EFAULT;
68010 +
68011 + return 0;
68012 +}
68013 +
68014 +static int copy_role_allowed_ip_normal(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp)
68015 +{
68016 + if (copy_from_user(roleip, userp, sizeof(struct role_allowed_ip)))
68017 + return -EFAULT;
68018 +
68019 + return 0;
68020 +}
68021 +
68022 +static int copy_sprole_pw_normal(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp)
68023 +{
68024 + if (copy_from_user(pw, userp + idx, sizeof(struct sprole_pw)))
68025 + return -EFAULT;
68026 +
68027 + return 0;
68028 +}
68029 +
68030 +static int copy_gr_hash_struct_normal(struct gr_hash_struct *hash, const struct gr_hash_struct *userp)
68031 +{
68032 + if (copy_from_user(hash, userp, sizeof(struct gr_hash_struct)))
68033 + return -EFAULT;
68034 +
68035 + return 0;
68036 +}
68037 +
68038 +static int copy_role_transition_normal(struct role_transition *trans, const struct role_transition *userp)
68039 +{
68040 + if (copy_from_user(trans, userp, sizeof(struct role_transition)))
68041 + return -EFAULT;
68042 +
68043 + return 0;
68044 +}
68045 +
68046 +int copy_pointer_from_array_normal(void *ptr, unsigned long idx, const void *userp)
68047 +{
68048 + if (copy_from_user(ptr, userp + (idx * sizeof(void *)), sizeof(void *)))
68049 + return -EFAULT;
68050 +
68051 + return 0;
68052 +}
68053 +
68054 +static int copy_gr_arg_wrapper_normal(const char __user *buf, struct gr_arg_wrapper *uwrap)
68055 +{
68056 + if (copy_from_user(uwrap, buf, sizeof (struct gr_arg_wrapper)))
68057 + return -EFAULT;
68058 +
68059 + if (((uwrap->version != GRSECURITY_VERSION) &&
68060 + (uwrap->version != 0x2901)) ||
68061 + (uwrap->size != sizeof(struct gr_arg)))
68062 + return -EINVAL;
68063 +
68064 + return 0;
68065 +}
68066 +
68067 +static int copy_gr_arg_normal(const struct gr_arg __user *buf, struct gr_arg *arg)
68068 +{
68069 + if (copy_from_user(arg, buf, sizeof (struct gr_arg)))
68070 + return -EFAULT;
68071 +
68072 + return 0;
68073 +}
68074 +
68075 +static size_t get_gr_arg_wrapper_size_normal(void)
68076 +{
68077 + return sizeof(struct gr_arg_wrapper);
68078 +}
68079 +
68080 +#ifdef CONFIG_COMPAT
68081 +extern int copy_gr_arg_wrapper_compat(const char *buf, struct gr_arg_wrapper *uwrap);
68082 +extern int copy_gr_arg_compat(const struct gr_arg __user *buf, struct gr_arg *arg);
68083 +extern int copy_acl_object_label_compat(struct acl_object_label *obj, const struct acl_object_label *userp);
68084 +extern int copy_acl_subject_label_compat(struct acl_subject_label *subj, const struct acl_subject_label *userp);
68085 +extern int copy_acl_role_label_compat(struct acl_role_label *role, const struct acl_role_label *userp);
68086 +extern int copy_role_allowed_ip_compat(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp);
68087 +extern int copy_role_transition_compat(struct role_transition *trans, const struct role_transition *userp);
68088 +extern int copy_gr_hash_struct_compat(struct gr_hash_struct *hash, const struct gr_hash_struct *userp);
68089 +extern int copy_pointer_from_array_compat(void *ptr, unsigned long idx, const void *userp);
68090 +extern int copy_acl_ip_label_compat(struct acl_ip_label *ip, const struct acl_ip_label *userp);
68091 +extern int copy_sprole_pw_compat(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp);
68092 +extern size_t get_gr_arg_wrapper_size_compat(void);
68093 +
68094 +int (* copy_gr_arg_wrapper)(const char *buf, struct gr_arg_wrapper *uwrap) __read_only;
68095 +int (* copy_gr_arg)(const struct gr_arg *buf, struct gr_arg *arg) __read_only;
68096 +int (* copy_acl_object_label)(struct acl_object_label *obj, const struct acl_object_label *userp) __read_only;
68097 +int (* copy_acl_subject_label)(struct acl_subject_label *subj, const struct acl_subject_label *userp) __read_only;
68098 +int (* copy_acl_role_label)(struct acl_role_label *role, const struct acl_role_label *userp) __read_only;
68099 +int (* copy_acl_ip_label)(struct acl_ip_label *ip, const struct acl_ip_label *userp) __read_only;
68100 +int (* copy_pointer_from_array)(void *ptr, unsigned long idx, const void *userp) __read_only;
68101 +int (* copy_sprole_pw)(struct sprole_pw *pw, unsigned long idx, const struct sprole_pw *userp) __read_only;
68102 +int (* copy_gr_hash_struct)(struct gr_hash_struct *hash, const struct gr_hash_struct *userp) __read_only;
68103 +int (* copy_role_transition)(struct role_transition *trans, const struct role_transition *userp) __read_only;
68104 +int (* copy_role_allowed_ip)(struct role_allowed_ip *roleip, const struct role_allowed_ip *userp) __read_only;
68105 +size_t (* get_gr_arg_wrapper_size)(void) __read_only;
68106 +
68107 +#else
68108 +#define copy_gr_arg_wrapper copy_gr_arg_wrapper_normal
68109 +#define copy_gr_arg copy_gr_arg_normal
68110 +#define copy_gr_hash_struct copy_gr_hash_struct_normal
68111 +#define copy_acl_object_label copy_acl_object_label_normal
68112 +#define copy_acl_subject_label copy_acl_subject_label_normal
68113 +#define copy_acl_role_label copy_acl_role_label_normal
68114 +#define copy_acl_ip_label copy_acl_ip_label_normal
68115 +#define copy_pointer_from_array copy_pointer_from_array_normal
68116 +#define copy_sprole_pw copy_sprole_pw_normal
68117 +#define copy_role_transition copy_role_transition_normal
68118 +#define copy_role_allowed_ip copy_role_allowed_ip_normal
68119 +#define get_gr_arg_wrapper_size get_gr_arg_wrapper_size_normal
68120 +#endif
68121 +
68122 +static struct acl_subject_label *
68123 +lookup_subject_map(const struct acl_subject_label *userp)
68124 +{
68125 + unsigned int index = gr_shash(userp, polstate->subj_map_set.s_size);
68126 + struct subject_map *match;
68127 +
68128 + match = polstate->subj_map_set.s_hash[index];
68129 +
68130 + while (match && match->user != userp)
68131 + match = match->next;
68132 +
68133 + if (match != NULL)
68134 + return match->kernel;
68135 + else
68136 + return NULL;
68137 +}
68138 +
68139 +static void
68140 +insert_subj_map_entry(struct subject_map *subjmap)
68141 +{
68142 + unsigned int index = gr_shash(subjmap->user, polstate->subj_map_set.s_size);
68143 + struct subject_map **curr;
68144 +
68145 + subjmap->prev = NULL;
68146 +
68147 + curr = &polstate->subj_map_set.s_hash[index];
68148 + if (*curr != NULL)
68149 + (*curr)->prev = subjmap;
68150 +
68151 + subjmap->next = *curr;
68152 + *curr = subjmap;
68153 +
68154 + return;
68155 +}
68156 +
68157 +static void
68158 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
68159 +{
68160 + unsigned int index =
68161 + gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), polstate->acl_role_set.r_size);
68162 + struct acl_role_label **curr;
68163 + struct acl_role_label *tmp, *tmp2;
68164 +
68165 + curr = &polstate->acl_role_set.r_hash[index];
68166 +
68167 + /* simple case, slot is empty, just set it to our role */
68168 + if (*curr == NULL) {
68169 + *curr = role;
68170 + } else {
68171 + /* example:
68172 + 1 -> 2 -> 3 (adding 2 -> 3 to here)
68173 + 2 -> 3
68174 + */
68175 + /* first check to see if we can already be reached via this slot */
68176 + tmp = *curr;
68177 + while (tmp && tmp != role)
68178 + tmp = tmp->next;
68179 + if (tmp == role) {
68180 + /* we don't need to add ourselves to this slot's chain */
68181 + return;
68182 + }
68183 + /* we need to add ourselves to this chain, two cases */
68184 + if (role->next == NULL) {
68185 + /* simple case, append the current chain to our role */
68186 + role->next = *curr;
68187 + *curr = role;
68188 + } else {
68189 + /* 1 -> 2 -> 3 -> 4
68190 + 2 -> 3 -> 4
68191 + 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
68192 + */
68193 + /* trickier case: walk our role's chain until we find
68194 + the role for the start of the current slot's chain */
68195 + tmp = role;
68196 + tmp2 = *curr;
68197 + while (tmp->next && tmp->next != tmp2)
68198 + tmp = tmp->next;
68199 + if (tmp->next == tmp2) {
68200 + /* from example above, we found 3, so just
68201 + replace this slot's chain with ours */
68202 + *curr = role;
68203 + } else {
68204 + /* we didn't find a subset of our role's chain
68205 + in the current slot's chain, so append their
68206 + chain to ours, and set us as the first role in
68207 + the slot's chain
68208 +
68209 + we could fold this case with the case above,
68210 + but making it explicit for clarity
68211 + */
68212 + tmp->next = tmp2;
68213 + *curr = role;
68214 + }
68215 + }
68216 + }
68217 +
68218 + return;
68219 +}
68220 +
68221 +static void
68222 +insert_acl_role_label(struct acl_role_label *role)
68223 +{
68224 + int i;
68225 +
68226 + if (polstate->role_list == NULL) {
68227 + polstate->role_list = role;
68228 + role->prev = NULL;
68229 + } else {
68230 + role->prev = polstate->role_list;
68231 + polstate->role_list = role;
68232 + }
68233 +
68234 + /* used for hash chains */
68235 + role->next = NULL;
68236 +
68237 + if (role->roletype & GR_ROLE_DOMAIN) {
68238 + for (i = 0; i < role->domain_child_num; i++)
68239 + __insert_acl_role_label(role, role->domain_children[i]);
68240 + } else
68241 + __insert_acl_role_label(role, role->uidgid);
68242 +}
68243 +
68244 +static int
68245 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
68246 +{
68247 + struct name_entry **curr, *nentry;
68248 + struct inodev_entry *ientry;
68249 + unsigned int len = strlen(name);
68250 + unsigned int key = full_name_hash(name, len);
68251 + unsigned int index = key % polstate->name_set.n_size;
68252 +
68253 + curr = &polstate->name_set.n_hash[index];
68254 +
68255 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
68256 + curr = &((*curr)->next);
68257 +
68258 + if (*curr != NULL)
68259 + return 1;
68260 +
68261 + nentry = acl_alloc(sizeof (struct name_entry));
68262 + if (nentry == NULL)
68263 + return 0;
68264 + ientry = acl_alloc(sizeof (struct inodev_entry));
68265 + if (ientry == NULL)
68266 + return 0;
68267 + ientry->nentry = nentry;
68268 +
68269 + nentry->key = key;
68270 + nentry->name = name;
68271 + nentry->inode = inode;
68272 + nentry->device = device;
68273 + nentry->len = len;
68274 + nentry->deleted = deleted;
68275 +
68276 + nentry->prev = NULL;
68277 + curr = &polstate->name_set.n_hash[index];
68278 + if (*curr != NULL)
68279 + (*curr)->prev = nentry;
68280 + nentry->next = *curr;
68281 + *curr = nentry;
68282 +
68283 + /* insert us into the table searchable by inode/dev */
68284 + __insert_inodev_entry(polstate, ientry);
68285 +
68286 + return 1;
68287 +}
68288 +
68289 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
68290 +
68291 +static void *
68292 +create_table(__u32 * len, int elementsize)
68293 +{
68294 + unsigned int table_sizes[] = {
68295 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
68296 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
68297 + 4194301, 8388593, 16777213, 33554393, 67108859
68298 + };
68299 + void *newtable = NULL;
68300 + unsigned int pwr = 0;
68301 +
68302 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
68303 + table_sizes[pwr] <= *len)
68304 + pwr++;
68305 +
68306 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
68307 + return newtable;
68308 +
68309 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
68310 + newtable =
68311 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
68312 + else
68313 + newtable = vmalloc(table_sizes[pwr] * elementsize);
68314 +
68315 + *len = table_sizes[pwr];
68316 +
68317 + return newtable;
68318 +}
68319 +
68320 +static int
68321 +init_variables(const struct gr_arg *arg, bool reload)
68322 +{
68323 + struct task_struct *reaper = init_pid_ns.child_reaper;
68324 + unsigned int stacksize;
68325 +
68326 + polstate->subj_map_set.s_size = arg->role_db.num_subjects;
68327 + polstate->acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
68328 + polstate->name_set.n_size = arg->role_db.num_objects;
68329 + polstate->inodev_set.i_size = arg->role_db.num_objects;
68330 +
68331 + if (!polstate->subj_map_set.s_size || !polstate->acl_role_set.r_size ||
68332 + !polstate->name_set.n_size || !polstate->inodev_set.i_size)
68333 + return 1;
68334 +
68335 + if (!reload) {
68336 + if (!gr_init_uidset())
68337 + return 1;
68338 + }
68339 +
68340 + /* set up the stack that holds allocation info */
68341 +
68342 + stacksize = arg->role_db.num_pointers + 5;
68343 +
68344 + if (!acl_alloc_stack_init(stacksize))
68345 + return 1;
68346 +
68347 + if (!reload) {
68348 + /* grab reference for the real root dentry and vfsmount */
68349 + get_fs_root(reaper->fs, &gr_real_root);
68350 +
68351 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
68352 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(gr_real_root.dentry), gr_real_root.dentry->d_inode->i_ino);
68353 +#endif
68354 +
68355 + fakefs_obj_rw = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
68356 + if (fakefs_obj_rw == NULL)
68357 + return 1;
68358 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
68359 +
68360 + fakefs_obj_rwx = kzalloc(sizeof(struct acl_object_label), GFP_KERNEL);
68361 + if (fakefs_obj_rwx == NULL)
68362 + return 1;
68363 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
68364 + }
68365 +
68366 + polstate->subj_map_set.s_hash =
68367 + (struct subject_map **) create_table(&polstate->subj_map_set.s_size, sizeof(void *));
68368 + polstate->acl_role_set.r_hash =
68369 + (struct acl_role_label **) create_table(&polstate->acl_role_set.r_size, sizeof(void *));
68370 + polstate->name_set.n_hash = (struct name_entry **) create_table(&polstate->name_set.n_size, sizeof(void *));
68371 + polstate->inodev_set.i_hash =
68372 + (struct inodev_entry **) create_table(&polstate->inodev_set.i_size, sizeof(void *));
68373 +
68374 + if (!polstate->subj_map_set.s_hash || !polstate->acl_role_set.r_hash ||
68375 + !polstate->name_set.n_hash || !polstate->inodev_set.i_hash)
68376 + return 1;
68377 +
68378 + memset(polstate->subj_map_set.s_hash, 0,
68379 + sizeof(struct subject_map *) * polstate->subj_map_set.s_size);
68380 + memset(polstate->acl_role_set.r_hash, 0,
68381 + sizeof (struct acl_role_label *) * polstate->acl_role_set.r_size);
68382 + memset(polstate->name_set.n_hash, 0,
68383 + sizeof (struct name_entry *) * polstate->name_set.n_size);
68384 + memset(polstate->inodev_set.i_hash, 0,
68385 + sizeof (struct inodev_entry *) * polstate->inodev_set.i_size);
68386 +
68387 + return 0;
68388 +}
68389 +
68390 +/* free information not needed after startup
68391 + currently contains user->kernel pointer mappings for subjects
68392 +*/
68393 +
68394 +static void
68395 +free_init_variables(void)
68396 +{
68397 + __u32 i;
68398 +
68399 + if (polstate->subj_map_set.s_hash) {
68400 + for (i = 0; i < polstate->subj_map_set.s_size; i++) {
68401 + if (polstate->subj_map_set.s_hash[i]) {
68402 + kfree(polstate->subj_map_set.s_hash[i]);
68403 + polstate->subj_map_set.s_hash[i] = NULL;
68404 + }
68405 + }
68406 +
68407 + if ((polstate->subj_map_set.s_size * sizeof (struct subject_map *)) <=
68408 + PAGE_SIZE)
68409 + kfree(polstate->subj_map_set.s_hash);
68410 + else
68411 + vfree(polstate->subj_map_set.s_hash);
68412 + }
68413 +
68414 + return;
68415 +}
68416 +
68417 +static void
68418 +free_variables(bool reload)
68419 +{
68420 + struct acl_subject_label *s;
68421 + struct acl_role_label *r;
68422 + struct task_struct *task, *task2;
68423 + unsigned int x;
68424 +
68425 + if (!reload) {
68426 + gr_clear_learn_entries();
68427 +
68428 + read_lock(&tasklist_lock);
68429 + do_each_thread(task2, task) {
68430 + task->acl_sp_role = 0;
68431 + task->acl_role_id = 0;
68432 + task->inherited = 0;
68433 + task->acl = NULL;
68434 + task->role = NULL;
68435 + } while_each_thread(task2, task);
68436 + read_unlock(&tasklist_lock);
68437 +
68438 + kfree(fakefs_obj_rw);
68439 + fakefs_obj_rw = NULL;
68440 + kfree(fakefs_obj_rwx);
68441 + fakefs_obj_rwx = NULL;
68442 +
68443 + /* release the reference to the real root dentry and vfsmount */
68444 + path_put(&gr_real_root);
68445 + memset(&gr_real_root, 0, sizeof(gr_real_root));
68446 + }
68447 +
68448 + /* free all object hash tables */
68449 +
68450 + FOR_EACH_ROLE_START(r)
68451 + if (r->subj_hash == NULL)
68452 + goto next_role;
68453 + FOR_EACH_SUBJECT_START(r, s, x)
68454 + if (s->obj_hash == NULL)
68455 + break;
68456 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
68457 + kfree(s->obj_hash);
68458 + else
68459 + vfree(s->obj_hash);
68460 + FOR_EACH_SUBJECT_END(s, x)
68461 + FOR_EACH_NESTED_SUBJECT_START(r, s)
68462 + if (s->obj_hash == NULL)
68463 + break;
68464 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
68465 + kfree(s->obj_hash);
68466 + else
68467 + vfree(s->obj_hash);
68468 + FOR_EACH_NESTED_SUBJECT_END(s)
68469 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
68470 + kfree(r->subj_hash);
68471 + else
68472 + vfree(r->subj_hash);
68473 + r->subj_hash = NULL;
68474 +next_role:
68475 + FOR_EACH_ROLE_END(r)
68476 +
68477 + acl_free_all();
68478 +
68479 + if (polstate->acl_role_set.r_hash) {
68480 + if ((polstate->acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
68481 + PAGE_SIZE)
68482 + kfree(polstate->acl_role_set.r_hash);
68483 + else
68484 + vfree(polstate->acl_role_set.r_hash);
68485 + }
68486 + if (polstate->name_set.n_hash) {
68487 + if ((polstate->name_set.n_size * sizeof (struct name_entry *)) <=
68488 + PAGE_SIZE)
68489 + kfree(polstate->name_set.n_hash);
68490 + else
68491 + vfree(polstate->name_set.n_hash);
68492 + }
68493 +
68494 + if (polstate->inodev_set.i_hash) {
68495 + if ((polstate->inodev_set.i_size * sizeof (struct inodev_entry *)) <=
68496 + PAGE_SIZE)
68497 + kfree(polstate->inodev_set.i_hash);
68498 + else
68499 + vfree(polstate->inodev_set.i_hash);
68500 + }
68501 +
68502 + if (!reload)
68503 + gr_free_uidset();
68504 +
68505 + memset(&polstate->name_set, 0, sizeof (struct name_db));
68506 + memset(&polstate->inodev_set, 0, sizeof (struct inodev_db));
68507 + memset(&polstate->acl_role_set, 0, sizeof (struct acl_role_db));
68508 + memset(&polstate->subj_map_set, 0, sizeof (struct acl_subj_map_db));
68509 +
68510 + polstate->default_role = NULL;
68511 + polstate->kernel_role = NULL;
68512 + polstate->role_list = NULL;
68513 +
68514 + return;
68515 +}
68516 +
68517 +static struct acl_subject_label *
68518 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
68519 +
68520 +static int alloc_and_copy_string(char **name, unsigned int maxlen)
68521 +{
68522 + unsigned int len = strnlen_user(*name, maxlen);
68523 + char *tmp;
68524 +
68525 + if (!len || len >= maxlen)
68526 + return -EINVAL;
68527 +
68528 + if ((tmp = (char *) acl_alloc(len)) == NULL)
68529 + return -ENOMEM;
68530 +
68531 + if (copy_from_user(tmp, *name, len))
68532 + return -EFAULT;
68533 +
68534 + tmp[len-1] = '\0';
68535 + *name = tmp;
68536 +
68537 + return 0;
68538 +}
68539 +
68540 +static int
68541 +copy_user_glob(struct acl_object_label *obj)
68542 +{
68543 + struct acl_object_label *g_tmp, **guser;
68544 + int error;
68545 +
68546 + if (obj->globbed == NULL)
68547 + return 0;
68548 +
68549 + guser = &obj->globbed;
68550 + while (*guser) {
68551 + g_tmp = (struct acl_object_label *)
68552 + acl_alloc(sizeof (struct acl_object_label));
68553 + if (g_tmp == NULL)
68554 + return -ENOMEM;
68555 +
68556 + if (copy_acl_object_label(g_tmp, *guser))
68557 + return -EFAULT;
68558 +
68559 + error = alloc_and_copy_string(&g_tmp->filename, PATH_MAX);
68560 + if (error)
68561 + return error;
68562 +
68563 + *guser = g_tmp;
68564 + guser = &(g_tmp->next);
68565 + }
68566 +
68567 + return 0;
68568 +}
68569 +
68570 +static int
68571 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
68572 + struct acl_role_label *role)
68573 +{
68574 + struct acl_object_label *o_tmp;
68575 + int ret;
68576 +
68577 + while (userp) {
68578 + if ((o_tmp = (struct acl_object_label *)
68579 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
68580 + return -ENOMEM;
68581 +
68582 + if (copy_acl_object_label(o_tmp, userp))
68583 + return -EFAULT;
68584 +
68585 + userp = o_tmp->prev;
68586 +
68587 + ret = alloc_and_copy_string(&o_tmp->filename, PATH_MAX);
68588 + if (ret)
68589 + return ret;
68590 +
68591 + insert_acl_obj_label(o_tmp, subj);
68592 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
68593 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
68594 + return -ENOMEM;
68595 +
68596 + ret = copy_user_glob(o_tmp);
68597 + if (ret)
68598 + return ret;
68599 +
68600 + if (o_tmp->nested) {
68601 + int already_copied;
68602 +
68603 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
68604 + if (IS_ERR(o_tmp->nested))
68605 + return PTR_ERR(o_tmp->nested);
68606 +
68607 + /* insert into nested subject list if we haven't copied this one yet
68608 + to prevent duplicate entries */
68609 + if (!already_copied) {
68610 + o_tmp->nested->next = role->hash->first;
68611 + role->hash->first = o_tmp->nested;
68612 + }
68613 + }
68614 + }
68615 +
68616 + return 0;
68617 +}
68618 +
68619 +static __u32
68620 +count_user_subjs(struct acl_subject_label *userp)
68621 +{
68622 + struct acl_subject_label s_tmp;
68623 + __u32 num = 0;
68624 +
68625 + while (userp) {
68626 + if (copy_acl_subject_label(&s_tmp, userp))
68627 + break;
68628 +
68629 + userp = s_tmp.prev;
68630 + }
68631 +
68632 + return num;
68633 +}
68634 +
68635 +static int
68636 +copy_user_allowedips(struct acl_role_label *rolep)
68637 +{
68638 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
68639 +
68640 + ruserip = rolep->allowed_ips;
68641 +
68642 + while (ruserip) {
68643 + rlast = rtmp;
68644 +
68645 + if ((rtmp = (struct role_allowed_ip *)
68646 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
68647 + return -ENOMEM;
68648 +
68649 + if (copy_role_allowed_ip(rtmp, ruserip))
68650 + return -EFAULT;
68651 +
68652 + ruserip = rtmp->prev;
68653 +
68654 + if (!rlast) {
68655 + rtmp->prev = NULL;
68656 + rolep->allowed_ips = rtmp;
68657 + } else {
68658 + rlast->next = rtmp;
68659 + rtmp->prev = rlast;
68660 + }
68661 +
68662 + if (!ruserip)
68663 + rtmp->next = NULL;
68664 + }
68665 +
68666 + return 0;
68667 +}
68668 +
68669 +static int
68670 +copy_user_transitions(struct acl_role_label *rolep)
68671 +{
68672 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
68673 + int error;
68674 +
68675 + rusertp = rolep->transitions;
68676 +
68677 + while (rusertp) {
68678 + rlast = rtmp;
68679 +
68680 + if ((rtmp = (struct role_transition *)
68681 + acl_alloc(sizeof (struct role_transition))) == NULL)
68682 + return -ENOMEM;
68683 +
68684 + if (copy_role_transition(rtmp, rusertp))
68685 + return -EFAULT;
68686 +
68687 + rusertp = rtmp->prev;
68688 +
68689 + error = alloc_and_copy_string(&rtmp->rolename, GR_SPROLE_LEN);
68690 + if (error)
68691 + return error;
68692 +
68693 + if (!rlast) {
68694 + rtmp->prev = NULL;
68695 + rolep->transitions = rtmp;
68696 + } else {
68697 + rlast->next = rtmp;
68698 + rtmp->prev = rlast;
68699 + }
68700 +
68701 + if (!rusertp)
68702 + rtmp->next = NULL;
68703 + }
68704 +
68705 + return 0;
68706 +}
68707 +
68708 +static __u32 count_user_objs(const struct acl_object_label __user *userp)
68709 +{
68710 + struct acl_object_label o_tmp;
68711 + __u32 num = 0;
68712 +
68713 + while (userp) {
68714 + if (copy_acl_object_label(&o_tmp, userp))
68715 + break;
68716 +
68717 + userp = o_tmp.prev;
68718 + num++;
68719 + }
68720 +
68721 + return num;
68722 +}
68723 +
68724 +static struct acl_subject_label *
68725 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
68726 +{
68727 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
68728 + __u32 num_objs;
68729 + struct acl_ip_label **i_tmp, *i_utmp2;
68730 + struct gr_hash_struct ghash;
68731 + struct subject_map *subjmap;
68732 + unsigned int i_num;
68733 + int err;
68734 +
68735 + if (already_copied != NULL)
68736 + *already_copied = 0;
68737 +
68738 + s_tmp = lookup_subject_map(userp);
68739 +
68740 + /* we've already copied this subject into the kernel, just return
68741 + the reference to it, and don't copy it over again
68742 + */
68743 + if (s_tmp) {
68744 + if (already_copied != NULL)
68745 + *already_copied = 1;
68746 + return(s_tmp);
68747 + }
68748 +
68749 + if ((s_tmp = (struct acl_subject_label *)
68750 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
68751 + return ERR_PTR(-ENOMEM);
68752 +
68753 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
68754 + if (subjmap == NULL)
68755 + return ERR_PTR(-ENOMEM);
68756 +
68757 + subjmap->user = userp;
68758 + subjmap->kernel = s_tmp;
68759 + insert_subj_map_entry(subjmap);
68760 +
68761 + if (copy_acl_subject_label(s_tmp, userp))
68762 + return ERR_PTR(-EFAULT);
68763 +
68764 + err = alloc_and_copy_string(&s_tmp->filename, PATH_MAX);
68765 + if (err)
68766 + return ERR_PTR(err);
68767 +
68768 + if (!strcmp(s_tmp->filename, "/"))
68769 + role->root_label = s_tmp;
68770 +
68771 + if (copy_gr_hash_struct(&ghash, s_tmp->hash))
68772 + return ERR_PTR(-EFAULT);
68773 +
68774 + /* copy user and group transition tables */
68775 +
68776 + if (s_tmp->user_trans_num) {
68777 + uid_t *uidlist;
68778 +
68779 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
68780 + if (uidlist == NULL)
68781 + return ERR_PTR(-ENOMEM);
68782 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
68783 + return ERR_PTR(-EFAULT);
68784 +
68785 + s_tmp->user_transitions = uidlist;
68786 + }
68787 +
68788 + if (s_tmp->group_trans_num) {
68789 + gid_t *gidlist;
68790 +
68791 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
68792 + if (gidlist == NULL)
68793 + return ERR_PTR(-ENOMEM);
68794 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
68795 + return ERR_PTR(-EFAULT);
68796 +
68797 + s_tmp->group_transitions = gidlist;
68798 + }
68799 +
68800 + /* set up object hash table */
68801 + num_objs = count_user_objs(ghash.first);
68802 +
68803 + s_tmp->obj_hash_size = num_objs;
68804 + s_tmp->obj_hash =
68805 + (struct acl_object_label **)
68806 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
68807 +
68808 + if (!s_tmp->obj_hash)
68809 + return ERR_PTR(-ENOMEM);
68810 +
68811 + memset(s_tmp->obj_hash, 0,
68812 + s_tmp->obj_hash_size *
68813 + sizeof (struct acl_object_label *));
68814 +
68815 + /* add in objects */
68816 + err = copy_user_objs(ghash.first, s_tmp, role);
68817 +
68818 + if (err)
68819 + return ERR_PTR(err);
68820 +
68821 + /* set pointer for parent subject */
68822 + if (s_tmp->parent_subject) {
68823 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
68824 +
68825 + if (IS_ERR(s_tmp2))
68826 + return s_tmp2;
68827 +
68828 + s_tmp->parent_subject = s_tmp2;
68829 + }
68830 +
68831 + /* add in ip acls */
68832 +
68833 + if (!s_tmp->ip_num) {
68834 + s_tmp->ips = NULL;
68835 + goto insert;
68836 + }
68837 +
68838 + i_tmp =
68839 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
68840 + sizeof (struct acl_ip_label *));
68841 +
68842 + if (!i_tmp)
68843 + return ERR_PTR(-ENOMEM);
68844 +
68845 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
68846 + *(i_tmp + i_num) =
68847 + (struct acl_ip_label *)
68848 + acl_alloc(sizeof (struct acl_ip_label));
68849 + if (!*(i_tmp + i_num))
68850 + return ERR_PTR(-ENOMEM);
68851 +
68852 + if (copy_pointer_from_array(&i_utmp2, i_num, s_tmp->ips))
68853 + return ERR_PTR(-EFAULT);
68854 +
68855 + if (copy_acl_ip_label(*(i_tmp + i_num), i_utmp2))
68856 + return ERR_PTR(-EFAULT);
68857 +
68858 + if ((*(i_tmp + i_num))->iface == NULL)
68859 + continue;
68860 +
68861 + err = alloc_and_copy_string(&(*(i_tmp + i_num))->iface, IFNAMSIZ);
68862 + if (err)
68863 + return ERR_PTR(err);
68864 + }
68865 +
68866 + s_tmp->ips = i_tmp;
68867 +
68868 +insert:
68869 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
68870 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
68871 + return ERR_PTR(-ENOMEM);
68872 +
68873 + return s_tmp;
68874 +}
68875 +
68876 +static int
68877 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
68878 +{
68879 + struct acl_subject_label s_pre;
68880 + struct acl_subject_label * ret;
68881 + int err;
68882 +
68883 + while (userp) {
68884 + if (copy_acl_subject_label(&s_pre, userp))
68885 + return -EFAULT;
68886 +
68887 + ret = do_copy_user_subj(userp, role, NULL);
68888 +
68889 + err = PTR_ERR(ret);
68890 + if (IS_ERR(ret))
68891 + return err;
68892 +
68893 + insert_acl_subj_label(ret, role);
68894 +
68895 + userp = s_pre.prev;
68896 + }
68897 +
68898 + return 0;
68899 +}
68900 +
68901 +static int
68902 +copy_user_acl(struct gr_arg *arg)
68903 +{
68904 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
68905 + struct acl_subject_label *subj_list;
68906 + struct sprole_pw *sptmp;
68907 + struct gr_hash_struct *ghash;
68908 + uid_t *domainlist;
68909 + unsigned int r_num;
68910 + int err = 0;
68911 + __u16 i;
68912 + __u32 num_subjs;
68913 +
68914 + /* we need a default and kernel role */
68915 + if (arg->role_db.num_roles < 2)
68916 + return -EINVAL;
68917 +
68918 + /* copy special role authentication info from userspace */
68919 +
68920 + polstate->num_sprole_pws = arg->num_sprole_pws;
68921 + polstate->acl_special_roles = (struct sprole_pw **) acl_alloc_num(polstate->num_sprole_pws, sizeof(struct sprole_pw *));
68922 +
68923 + if (!polstate->acl_special_roles && polstate->num_sprole_pws)
68924 + return -ENOMEM;
68925 +
68926 + for (i = 0; i < polstate->num_sprole_pws; i++) {
68927 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
68928 + if (!sptmp)
68929 + return -ENOMEM;
68930 + if (copy_sprole_pw(sptmp, i, arg->sprole_pws))
68931 + return -EFAULT;
68932 +
68933 + err = alloc_and_copy_string((char **)&sptmp->rolename, GR_SPROLE_LEN);
68934 + if (err)
68935 + return err;
68936 +
68937 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
68938 + printk(KERN_ALERT "Copying special role %s\n", sptmp->rolename);
68939 +#endif
68940 +
68941 + polstate->acl_special_roles[i] = sptmp;
68942 + }
68943 +
68944 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
68945 +
68946 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
68947 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
68948 +
68949 + if (!r_tmp)
68950 + return -ENOMEM;
68951 +
68952 + if (copy_pointer_from_array(&r_utmp2, r_num, r_utmp))
68953 + return -EFAULT;
68954 +
68955 + if (copy_acl_role_label(r_tmp, r_utmp2))
68956 + return -EFAULT;
68957 +
68958 + err = alloc_and_copy_string(&r_tmp->rolename, GR_SPROLE_LEN);
68959 + if (err)
68960 + return err;
68961 +
68962 + if (!strcmp(r_tmp->rolename, "default")
68963 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
68964 + polstate->default_role = r_tmp;
68965 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
68966 + polstate->kernel_role = r_tmp;
68967 + }
68968 +
68969 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
68970 + return -ENOMEM;
68971 +
68972 + if (copy_gr_hash_struct(ghash, r_tmp->hash))
68973 + return -EFAULT;
68974 +
68975 + r_tmp->hash = ghash;
68976 +
68977 + num_subjs = count_user_subjs(r_tmp->hash->first);
68978 +
68979 + r_tmp->subj_hash_size = num_subjs;
68980 + r_tmp->subj_hash =
68981 + (struct acl_subject_label **)
68982 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
68983 +
68984 + if (!r_tmp->subj_hash)
68985 + return -ENOMEM;
68986 +
68987 + err = copy_user_allowedips(r_tmp);
68988 + if (err)
68989 + return err;
68990 +
68991 + /* copy domain info */
68992 + if (r_tmp->domain_children != NULL) {
68993 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
68994 + if (domainlist == NULL)
68995 + return -ENOMEM;
68996 +
68997 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
68998 + return -EFAULT;
68999 +
69000 + r_tmp->domain_children = domainlist;
69001 + }
69002 +
69003 + err = copy_user_transitions(r_tmp);
69004 + if (err)
69005 + return err;
69006 +
69007 + memset(r_tmp->subj_hash, 0,
69008 + r_tmp->subj_hash_size *
69009 + sizeof (struct acl_subject_label *));
69010 +
69011 + /* acquire the list of subjects, then NULL out
69012 + the list prior to parsing the subjects for this role,
69013 + as during this parsing the list is replaced with a list
69014 + of *nested* subjects for the role
69015 + */
69016 + subj_list = r_tmp->hash->first;
69017 +
69018 + /* set nested subject list to null */
69019 + r_tmp->hash->first = NULL;
69020 +
69021 + err = copy_user_subjs(subj_list, r_tmp);
69022 +
69023 + if (err)
69024 + return err;
69025 +
69026 + insert_acl_role_label(r_tmp);
69027 + }
69028 +
69029 + if (polstate->default_role == NULL || polstate->kernel_role == NULL)
69030 + return -EINVAL;
69031 +
69032 + return err;
69033 +}
69034 +
69035 +static int gracl_reload_apply_policies(void *reload)
69036 +{
69037 + struct gr_reload_state *reload_state = (struct gr_reload_state *)reload;
69038 + struct task_struct *task, *task2;
69039 + struct acl_role_label *role, *rtmp;
69040 + struct acl_subject_label *subj;
69041 + const struct cred *cred;
69042 + int role_applied;
69043 + int ret = 0;
69044 +
69045 + memcpy(&reload_state->oldpolicy, reload_state->oldpolicy_ptr, sizeof(struct gr_policy_state));
69046 + memcpy(&reload_state->oldalloc, reload_state->oldalloc_ptr, sizeof(struct gr_alloc_state));
69047 +
69048 + /* first make sure we'll be able to apply the new policy cleanly */
69049 + do_each_thread(task2, task) {
69050 + if (task->exec_file == NULL)
69051 + continue;
69052 + role_applied = 0;
69053 + if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
69054 + /* preserve special roles */
69055 + FOR_EACH_ROLE_START(role)
69056 + if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
69057 + rtmp = task->role;
69058 + task->role = role;
69059 + role_applied = 1;
69060 + break;
69061 + }
69062 + FOR_EACH_ROLE_END(role)
69063 + }
69064 + if (!role_applied) {
69065 + cred = __task_cred(task);
69066 + rtmp = task->role;
69067 + task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
69068 + }
69069 + /* this handles non-nested inherited subjects, nested subjects will still
69070 + be dropped currently */
69071 + subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
69072 + task->tmpacl = __gr_get_subject_for_task(polstate, task, NULL);
69073 + /* change the role back so that we've made no modifications to the policy */
69074 + task->role = rtmp;
69075 +
69076 + if (subj == NULL || task->tmpacl == NULL) {
69077 + ret = -EINVAL;
69078 + goto out;
69079 + }
69080 + } while_each_thread(task2, task);
69081 +
69082 + /* now actually apply the policy */
69083 +
69084 + do_each_thread(task2, task) {
69085 + if (task->exec_file) {
69086 + role_applied = 0;
69087 + if (!reload_state->oldmode && task->role->roletype & GR_ROLE_SPECIAL) {
69088 + /* preserve special roles */
69089 + FOR_EACH_ROLE_START(role)
69090 + if ((role->roletype & GR_ROLE_SPECIAL) && !strcmp(task->role->rolename, role->rolename)) {
69091 + task->role = role;
69092 + role_applied = 1;
69093 + break;
69094 + }
69095 + FOR_EACH_ROLE_END(role)
69096 + }
69097 + if (!role_applied) {
69098 + cred = __task_cred(task);
69099 + task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
69100 + }
69101 + /* this handles non-nested inherited subjects, nested subjects will still
69102 + be dropped currently */
69103 + if (!reload_state->oldmode && task->inherited)
69104 + subj = __gr_get_subject_for_task(polstate, task, task->acl->filename);
69105 + else {
69106 + /* looked up and tagged to the task previously */
69107 + subj = task->tmpacl;
69108 + }
69109 + /* subj will be non-null */
69110 + __gr_apply_subject_to_task(polstate, task, subj);
69111 + if (reload_state->oldmode) {
69112 + task->acl_role_id = 0;
69113 + task->acl_sp_role = 0;
69114 + task->inherited = 0;
69115 + }
69116 + } else {
69117 + // it's a kernel process
69118 + task->role = polstate->kernel_role;
69119 + task->acl = polstate->kernel_role->root_label;
69120 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
69121 + task->acl->mode &= ~GR_PROCFIND;
69122 +#endif
69123 + }
69124 + } while_each_thread(task2, task);
69125 +
69126 + memcpy(reload_state->oldpolicy_ptr, &reload_state->newpolicy, sizeof(struct gr_policy_state));
69127 + memcpy(reload_state->oldalloc_ptr, &reload_state->newalloc, sizeof(struct gr_alloc_state));
69128 +
69129 +out:
69130 +
69131 + return ret;
69132 +}
69133 +
69134 +static int gracl_reload(struct gr_arg *args, unsigned char oldmode)
69135 +{
69136 + struct gr_reload_state new_reload_state = { };
69137 + int err;
69138 +
69139 + new_reload_state.oldpolicy_ptr = polstate;
69140 + new_reload_state.oldalloc_ptr = current_alloc_state;
69141 + new_reload_state.oldmode = oldmode;
69142 +
69143 + current_alloc_state = &new_reload_state.newalloc;
69144 + polstate = &new_reload_state.newpolicy;
69145 +
69146 + /* everything relevant is now saved off, copy in the new policy */
69147 + if (init_variables(args, true)) {
69148 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
69149 + err = -ENOMEM;
69150 + goto error;
69151 + }
69152 +
69153 + err = copy_user_acl(args);
69154 + free_init_variables();
69155 + if (err)
69156 + goto error;
69157 + /* the new policy is copied in, with the old policy available via saved_state
69158 + first go through applying roles, making sure to preserve special roles
69159 + then apply new subjects, making sure to preserve inherited and nested subjects,
69160 + though currently only inherited subjects will be preserved
69161 + */
69162 + err = stop_machine(gracl_reload_apply_policies, &new_reload_state, NULL);
69163 + if (err)
69164 + goto error;
69165 +
69166 + /* we've now applied the new policy, so restore the old policy state to free it */
69167 + polstate = &new_reload_state.oldpolicy;
69168 + current_alloc_state = &new_reload_state.oldalloc;
69169 + free_variables(true);
69170 +
69171 + /* oldpolicy/oldalloc_ptr point to the new policy/alloc states as they were copied
69172 + to running_polstate/current_alloc_state inside stop_machine
69173 + */
69174 + err = 0;
69175 + goto out;
69176 +error:
69177 + /* on error of loading the new policy, we'll just keep the previous
69178 + policy set around
69179 + */
69180 + free_variables(true);
69181 +
69182 + /* doesn't affect runtime, but maintains consistent state */
69183 +out:
69184 + polstate = new_reload_state.oldpolicy_ptr;
69185 + current_alloc_state = new_reload_state.oldalloc_ptr;
69186 +
69187 + return err;
69188 +}
69189 +
69190 +static int
69191 +gracl_init(struct gr_arg *args)
69192 +{
69193 + int error = 0;
69194 +
69195 + memcpy(&gr_system_salt, args->salt, sizeof(gr_system_salt));
69196 + memcpy(&gr_system_sum, args->sum, sizeof(gr_system_sum));
69197 +
69198 + if (init_variables(args, false)) {
69199 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
69200 + error = -ENOMEM;
69201 + goto out;
69202 + }
69203 +
69204 + error = copy_user_acl(args);
69205 + free_init_variables();
69206 + if (error)
69207 + goto out;
69208 +
69209 + error = gr_set_acls(0);
69210 + if (error)
69211 + goto out;
69212 +
69213 + gr_enable_rbac_system();
69214 +
69215 + return 0;
69216 +
69217 +out:
69218 + free_variables(false);
69219 + return error;
69220 +}
69221 +
69222 +static int
69223 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
69224 + unsigned char **sum)
69225 +{
69226 + struct acl_role_label *r;
69227 + struct role_allowed_ip *ipp;
69228 + struct role_transition *trans;
69229 + unsigned int i;
69230 + int found = 0;
69231 + u32 curr_ip = current->signal->curr_ip;
69232 +
69233 + current->signal->saved_ip = curr_ip;
69234 +
69235 + /* check transition table */
69236 +
69237 + for (trans = current->role->transitions; trans; trans = trans->next) {
69238 + if (!strcmp(rolename, trans->rolename)) {
69239 + found = 1;
69240 + break;
69241 + }
69242 + }
69243 +
69244 + if (!found)
69245 + return 0;
69246 +
69247 + /* handle special roles that do not require authentication
69248 + and check ip */
69249 +
69250 + FOR_EACH_ROLE_START(r)
69251 + if (!strcmp(rolename, r->rolename) &&
69252 + (r->roletype & GR_ROLE_SPECIAL)) {
69253 + found = 0;
69254 + if (r->allowed_ips != NULL) {
69255 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
69256 + if ((ntohl(curr_ip) & ipp->netmask) ==
69257 + (ntohl(ipp->addr) & ipp->netmask))
69258 + found = 1;
69259 + }
69260 + } else
69261 + found = 2;
69262 + if (!found)
69263 + return 0;
69264 +
69265 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
69266 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
69267 + *salt = NULL;
69268 + *sum = NULL;
69269 + return 1;
69270 + }
69271 + }
69272 + FOR_EACH_ROLE_END(r)
69273 +
69274 + for (i = 0; i < polstate->num_sprole_pws; i++) {
69275 + if (!strcmp(rolename, polstate->acl_special_roles[i]->rolename)) {
69276 + *salt = polstate->acl_special_roles[i]->salt;
69277 + *sum = polstate->acl_special_roles[i]->sum;
69278 + return 1;
69279 + }
69280 + }
69281 +
69282 + return 0;
69283 +}
69284 +
69285 +int gr_check_secure_terminal(struct task_struct *task)
69286 +{
69287 + struct task_struct *p, *p2, *p3;
69288 + struct files_struct *files;
69289 + struct fdtable *fdt;
69290 + struct file *our_file = NULL, *file;
69291 + int i;
69292 +
69293 + if (task->signal->tty == NULL)
69294 + return 1;
69295 +
69296 + files = get_files_struct(task);
69297 + if (files != NULL) {
69298 + rcu_read_lock();
69299 + fdt = files_fdtable(files);
69300 + for (i=0; i < fdt->max_fds; i++) {
69301 + file = fcheck_files(files, i);
69302 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
69303 + get_file(file);
69304 + our_file = file;
69305 + }
69306 + }
69307 + rcu_read_unlock();
69308 + put_files_struct(files);
69309 + }
69310 +
69311 + if (our_file == NULL)
69312 + return 1;
69313 +
69314 + read_lock(&tasklist_lock);
69315 + do_each_thread(p2, p) {
69316 + files = get_files_struct(p);
69317 + if (files == NULL ||
69318 + (p->signal && p->signal->tty == task->signal->tty)) {
69319 + if (files != NULL)
69320 + put_files_struct(files);
69321 + continue;
69322 + }
69323 + rcu_read_lock();
69324 + fdt = files_fdtable(files);
69325 + for (i=0; i < fdt->max_fds; i++) {
69326 + file = fcheck_files(files, i);
69327 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
69328 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
69329 + p3 = task;
69330 + while (task_pid_nr(p3) > 0) {
69331 + if (p3 == p)
69332 + break;
69333 + p3 = p3->real_parent;
69334 + }
69335 + if (p3 == p)
69336 + break;
69337 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
69338 + gr_handle_alertkill(p);
69339 + rcu_read_unlock();
69340 + put_files_struct(files);
69341 + read_unlock(&tasklist_lock);
69342 + fput(our_file);
69343 + return 0;
69344 + }
69345 + }
69346 + rcu_read_unlock();
69347 + put_files_struct(files);
69348 + } while_each_thread(p2, p);
69349 + read_unlock(&tasklist_lock);
69350 +
69351 + fput(our_file);
69352 + return 1;
69353 +}
69354 +
69355 +ssize_t
69356 +write_grsec_handler(struct file *file, const char __user * buf, size_t count, loff_t *ppos)
69357 +{
69358 + struct gr_arg_wrapper uwrap;
69359 + unsigned char *sprole_salt = NULL;
69360 + unsigned char *sprole_sum = NULL;
69361 + int error = 0;
69362 + int error2 = 0;
69363 + size_t req_count = 0;
69364 + unsigned char oldmode = 0;
69365 +
69366 + mutex_lock(&gr_dev_mutex);
69367 +
69368 + if (gr_acl_is_enabled() && !(current->acl->mode & GR_KERNELAUTH)) {
69369 + error = -EPERM;
69370 + goto out;
69371 + }
69372 +
69373 +#ifdef CONFIG_COMPAT
69374 + pax_open_kernel();
69375 + if (is_compat_task()) {
69376 + copy_gr_arg_wrapper = &copy_gr_arg_wrapper_compat;
69377 + copy_gr_arg = &copy_gr_arg_compat;
69378 + copy_acl_object_label = &copy_acl_object_label_compat;
69379 + copy_acl_subject_label = &copy_acl_subject_label_compat;
69380 + copy_acl_role_label = &copy_acl_role_label_compat;
69381 + copy_acl_ip_label = &copy_acl_ip_label_compat;
69382 + copy_role_allowed_ip = &copy_role_allowed_ip_compat;
69383 + copy_role_transition = &copy_role_transition_compat;
69384 + copy_sprole_pw = &copy_sprole_pw_compat;
69385 + copy_gr_hash_struct = &copy_gr_hash_struct_compat;
69386 + copy_pointer_from_array = &copy_pointer_from_array_compat;
69387 + get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_compat;
69388 + } else {
69389 + copy_gr_arg_wrapper = &copy_gr_arg_wrapper_normal;
69390 + copy_gr_arg = &copy_gr_arg_normal;
69391 + copy_acl_object_label = &copy_acl_object_label_normal;
69392 + copy_acl_subject_label = &copy_acl_subject_label_normal;
69393 + copy_acl_role_label = &copy_acl_role_label_normal;
69394 + copy_acl_ip_label = &copy_acl_ip_label_normal;
69395 + copy_role_allowed_ip = &copy_role_allowed_ip_normal;
69396 + copy_role_transition = &copy_role_transition_normal;
69397 + copy_sprole_pw = &copy_sprole_pw_normal;
69398 + copy_gr_hash_struct = &copy_gr_hash_struct_normal;
69399 + copy_pointer_from_array = &copy_pointer_from_array_normal;
69400 + get_gr_arg_wrapper_size = &get_gr_arg_wrapper_size_normal;
69401 + }
69402 + pax_close_kernel();
69403 +#endif
69404 +
69405 + req_count = get_gr_arg_wrapper_size();
69406 +
69407 + if (count != req_count) {
69408 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)req_count);
69409 + error = -EINVAL;
69410 + goto out;
69411 + }
69412 +
69413 +
69414 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
69415 + gr_auth_expires = 0;
69416 + gr_auth_attempts = 0;
69417 + }
69418 +
69419 + error = copy_gr_arg_wrapper(buf, &uwrap);
69420 + if (error)
69421 + goto out;
69422 +
69423 + error = copy_gr_arg(uwrap.arg, &gr_usermode);
69424 + if (error)
69425 + goto out;
69426 +
69427 + if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_SPROLEPAM &&
69428 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
69429 + time_after(gr_auth_expires, get_seconds())) {
69430 + error = -EBUSY;
69431 + goto out;
69432 + }
69433 +
69434 + /* if non-root trying to do anything other than use a special role,
69435 + do not attempt authentication, do not count towards authentication
69436 + locking
69437 + */
69438 +
69439 + if (gr_usermode.mode != GR_SPROLE && gr_usermode.mode != GR_STATUS &&
69440 + gr_usermode.mode != GR_UNSPROLE && gr_usermode.mode != GR_SPROLEPAM &&
69441 + gr_is_global_nonroot(current_uid())) {
69442 + error = -EPERM;
69443 + goto out;
69444 + }
69445 +
69446 + /* ensure pw and special role name are null terminated */
69447 +
69448 + gr_usermode.pw[GR_PW_LEN - 1] = '\0';
69449 + gr_usermode.sp_role[GR_SPROLE_LEN - 1] = '\0';
69450 +
69451 + /* Okay.
69452 + * We have our enough of the argument structure..(we have yet
69453 + * to copy_from_user the tables themselves) . Copy the tables
69454 + * only if we need them, i.e. for loading operations. */
69455 +
69456 + switch (gr_usermode.mode) {
69457 + case GR_STATUS:
69458 + if (gr_acl_is_enabled()) {
69459 + error = 1;
69460 + if (!gr_check_secure_terminal(current))
69461 + error = 3;
69462 + } else
69463 + error = 2;
69464 + goto out;
69465 + case GR_SHUTDOWN:
69466 + if (gr_acl_is_enabled() && !(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
69467 + stop_machine(gr_rbac_disable, NULL, NULL);
69468 + free_variables(false);
69469 + memset(&gr_usermode, 0, sizeof(gr_usermode));
69470 + memset(&gr_system_salt, 0, sizeof(gr_system_salt));
69471 + memset(&gr_system_sum, 0, sizeof(gr_system_sum));
69472 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
69473 + } else if (gr_acl_is_enabled()) {
69474 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
69475 + error = -EPERM;
69476 + } else {
69477 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
69478 + error = -EAGAIN;
69479 + }
69480 + break;
69481 + case GR_ENABLE:
69482 + if (!gr_acl_is_enabled() && !(error2 = gracl_init(&gr_usermode)))
69483 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
69484 + else {
69485 + if (gr_acl_is_enabled())
69486 + error = -EAGAIN;
69487 + else
69488 + error = error2;
69489 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
69490 + }
69491 + break;
69492 + case GR_OLDRELOAD:
69493 + oldmode = 1;
69494 + case GR_RELOAD:
69495 + if (!gr_acl_is_enabled()) {
69496 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
69497 + error = -EAGAIN;
69498 + } else if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
69499 + error2 = gracl_reload(&gr_usermode, oldmode);
69500 + if (!error2)
69501 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
69502 + else {
69503 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
69504 + error = error2;
69505 + }
69506 + } else {
69507 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
69508 + error = -EPERM;
69509 + }
69510 + break;
69511 + case GR_SEGVMOD:
69512 + if (unlikely(!gr_acl_is_enabled())) {
69513 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
69514 + error = -EAGAIN;
69515 + break;
69516 + }
69517 +
69518 + if (!(chkpw(&gr_usermode, (unsigned char *)&gr_system_salt, (unsigned char *)&gr_system_sum))) {
69519 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
69520 + if (gr_usermode.segv_device && gr_usermode.segv_inode) {
69521 + struct acl_subject_label *segvacl;
69522 + segvacl =
69523 + lookup_acl_subj_label(gr_usermode.segv_inode,
69524 + gr_usermode.segv_device,
69525 + current->role);
69526 + if (segvacl) {
69527 + segvacl->crashes = 0;
69528 + segvacl->expires = 0;
69529 + }
69530 + } else if (gr_find_uid(gr_usermode.segv_uid) >= 0) {
69531 + gr_remove_uid(gr_usermode.segv_uid);
69532 + }
69533 + } else {
69534 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
69535 + error = -EPERM;
69536 + }
69537 + break;
69538 + case GR_SPROLE:
69539 + case GR_SPROLEPAM:
69540 + if (unlikely(!gr_acl_is_enabled())) {
69541 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
69542 + error = -EAGAIN;
69543 + break;
69544 + }
69545 +
69546 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
69547 + current->role->expires = 0;
69548 + current->role->auth_attempts = 0;
69549 + }
69550 +
69551 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
69552 + time_after(current->role->expires, get_seconds())) {
69553 + error = -EBUSY;
69554 + goto out;
69555 + }
69556 +
69557 + if (lookup_special_role_auth
69558 + (gr_usermode.mode, gr_usermode.sp_role, &sprole_salt, &sprole_sum)
69559 + && ((!sprole_salt && !sprole_sum)
69560 + || !(chkpw(&gr_usermode, sprole_salt, sprole_sum)))) {
69561 + char *p = "";
69562 + assign_special_role(gr_usermode.sp_role);
69563 + read_lock(&tasklist_lock);
69564 + if (current->real_parent)
69565 + p = current->real_parent->role->rolename;
69566 + read_unlock(&tasklist_lock);
69567 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
69568 + p, acl_sp_role_value);
69569 + } else {
69570 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode.sp_role);
69571 + error = -EPERM;
69572 + if(!(current->role->auth_attempts++))
69573 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
69574 +
69575 + goto out;
69576 + }
69577 + break;
69578 + case GR_UNSPROLE:
69579 + if (unlikely(!gr_acl_is_enabled())) {
69580 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
69581 + error = -EAGAIN;
69582 + break;
69583 + }
69584 +
69585 + if (current->role->roletype & GR_ROLE_SPECIAL) {
69586 + char *p = "";
69587 + int i = 0;
69588 +
69589 + read_lock(&tasklist_lock);
69590 + if (current->real_parent) {
69591 + p = current->real_parent->role->rolename;
69592 + i = current->real_parent->acl_role_id;
69593 + }
69594 + read_unlock(&tasklist_lock);
69595 +
69596 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
69597 + gr_set_acls(1);
69598 + } else {
69599 + error = -EPERM;
69600 + goto out;
69601 + }
69602 + break;
69603 + default:
69604 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode.mode);
69605 + error = -EINVAL;
69606 + break;
69607 + }
69608 +
69609 + if (error != -EPERM)
69610 + goto out;
69611 +
69612 + if(!(gr_auth_attempts++))
69613 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
69614 +
69615 + out:
69616 + mutex_unlock(&gr_dev_mutex);
69617 +
69618 + if (!error)
69619 + error = req_count;
69620 +
69621 + return error;
69622 +}
69623 +
69624 +int
69625 +gr_set_acls(const int type)
69626 +{
69627 + struct task_struct *task, *task2;
69628 + struct acl_role_label *role = current->role;
69629 + struct acl_subject_label *subj;
69630 + __u16 acl_role_id = current->acl_role_id;
69631 + const struct cred *cred;
69632 + int ret;
69633 +
69634 + rcu_read_lock();
69635 + read_lock(&tasklist_lock);
69636 + read_lock(&grsec_exec_file_lock);
69637 + do_each_thread(task2, task) {
69638 + /* check to see if we're called from the exit handler,
69639 + if so, only replace ACLs that have inherited the admin
69640 + ACL */
69641 +
69642 + if (type && (task->role != role ||
69643 + task->acl_role_id != acl_role_id))
69644 + continue;
69645 +
69646 + task->acl_role_id = 0;
69647 + task->acl_sp_role = 0;
69648 + task->inherited = 0;
69649 +
69650 + if (task->exec_file) {
69651 + cred = __task_cred(task);
69652 + task->role = __lookup_acl_role_label(polstate, task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
69653 + subj = __gr_get_subject_for_task(polstate, task, NULL);
69654 + if (subj == NULL) {
69655 + ret = -EINVAL;
69656 + read_unlock(&grsec_exec_file_lock);
69657 + read_unlock(&tasklist_lock);
69658 + rcu_read_unlock();
69659 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
69660 + return ret;
69661 + }
69662 + __gr_apply_subject_to_task(polstate, task, subj);
69663 + } else {
69664 + // it's a kernel process
69665 + task->role = polstate->kernel_role;
69666 + task->acl = polstate->kernel_role->root_label;
69667 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
69668 + task->acl->mode &= ~GR_PROCFIND;
69669 +#endif
69670 + }
69671 + } while_each_thread(task2, task);
69672 + read_unlock(&grsec_exec_file_lock);
69673 + read_unlock(&tasklist_lock);
69674 + rcu_read_unlock();
69675 +
69676 + return 0;
69677 +}
69678 diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
69679 new file mode 100644
69680 index 0000000..39645c9
69681 --- /dev/null
69682 +++ b/grsecurity/gracl_res.c
69683 @@ -0,0 +1,68 @@
69684 +#include <linux/kernel.h>
69685 +#include <linux/sched.h>
69686 +#include <linux/gracl.h>
69687 +#include <linux/grinternal.h>
69688 +
69689 +static const char *restab_log[] = {
69690 + [RLIMIT_CPU] = "RLIMIT_CPU",
69691 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
69692 + [RLIMIT_DATA] = "RLIMIT_DATA",
69693 + [RLIMIT_STACK] = "RLIMIT_STACK",
69694 + [RLIMIT_CORE] = "RLIMIT_CORE",
69695 + [RLIMIT_RSS] = "RLIMIT_RSS",
69696 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
69697 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
69698 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
69699 + [RLIMIT_AS] = "RLIMIT_AS",
69700 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
69701 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
69702 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
69703 + [RLIMIT_NICE] = "RLIMIT_NICE",
69704 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
69705 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
69706 + [GR_CRASH_RES] = "RLIMIT_CRASH"
69707 +};
69708 +
69709 +void
69710 +gr_log_resource(const struct task_struct *task,
69711 + const int res, const unsigned long wanted, const int gt)
69712 +{
69713 + const struct cred *cred;
69714 + unsigned long rlim;
69715 +
69716 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
69717 + return;
69718 +
69719 + // not yet supported resource
69720 + if (unlikely(!restab_log[res]))
69721 + return;
69722 +
69723 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
69724 + rlim = task_rlimit_max(task, res);
69725 + else
69726 + rlim = task_rlimit(task, res);
69727 +
69728 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
69729 + return;
69730 +
69731 + rcu_read_lock();
69732 + cred = __task_cred(task);
69733 +
69734 + if (res == RLIMIT_NPROC &&
69735 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
69736 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
69737 + goto out_rcu_unlock;
69738 + else if (res == RLIMIT_MEMLOCK &&
69739 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
69740 + goto out_rcu_unlock;
69741 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
69742 + goto out_rcu_unlock;
69743 + rcu_read_unlock();
69744 +
69745 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
69746 +
69747 + return;
69748 +out_rcu_unlock:
69749 + rcu_read_unlock();
69750 + return;
69751 +}
69752 diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
69753 new file mode 100644
69754 index 0000000..2040e61
69755 --- /dev/null
69756 +++ b/grsecurity/gracl_segv.c
69757 @@ -0,0 +1,313 @@
69758 +#include <linux/kernel.h>
69759 +#include <linux/mm.h>
69760 +#include <asm/uaccess.h>
69761 +#include <asm/errno.h>
69762 +#include <asm/mman.h>
69763 +#include <net/sock.h>
69764 +#include <linux/file.h>
69765 +#include <linux/fs.h>
69766 +#include <linux/net.h>
69767 +#include <linux/in.h>
69768 +#include <linux/slab.h>
69769 +#include <linux/types.h>
69770 +#include <linux/sched.h>
69771 +#include <linux/timer.h>
69772 +#include <linux/gracl.h>
69773 +#include <linux/grsecurity.h>
69774 +#include <linux/grinternal.h>
69775 +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69776 +#include <linux/magic.h>
69777 +#include <linux/pagemap.h>
69778 +#include "../fs/btrfs/async-thread.h"
69779 +#include "../fs/btrfs/ctree.h"
69780 +#include "../fs/btrfs/btrfs_inode.h"
69781 +#endif
69782 +
69783 +static struct crash_uid *uid_set;
69784 +static unsigned short uid_used;
69785 +static DEFINE_SPINLOCK(gr_uid_lock);
69786 +extern rwlock_t gr_inode_lock;
69787 +extern struct acl_subject_label *
69788 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
69789 + struct acl_role_label *role);
69790 +
69791 +static inline dev_t __get_dev(const struct dentry *dentry)
69792 +{
69793 +#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
69794 + if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
69795 + return BTRFS_I(dentry->d_inode)->root->anon_dev;
69796 + else
69797 +#endif
69798 + return dentry->d_sb->s_dev;
69799 +}
69800 +
69801 +int
69802 +gr_init_uidset(void)
69803 +{
69804 + uid_set =
69805 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
69806 + uid_used = 0;
69807 +
69808 + return uid_set ? 1 : 0;
69809 +}
69810 +
69811 +void
69812 +gr_free_uidset(void)
69813 +{
69814 + if (uid_set) {
69815 + struct crash_uid *tmpset;
69816 + spin_lock(&gr_uid_lock);
69817 + tmpset = uid_set;
69818 + uid_set = NULL;
69819 + uid_used = 0;
69820 + spin_unlock(&gr_uid_lock);
69821 + if (tmpset)
69822 + kfree(tmpset);
69823 + }
69824 +
69825 + return;
69826 +}
69827 +
69828 +int
69829 +gr_find_uid(const uid_t uid)
69830 +{
69831 + struct crash_uid *tmp = uid_set;
69832 + uid_t buid;
69833 + int low = 0, high = uid_used - 1, mid;
69834 +
69835 + while (high >= low) {
69836 + mid = (low + high) >> 1;
69837 + buid = tmp[mid].uid;
69838 + if (buid == uid)
69839 + return mid;
69840 + if (buid > uid)
69841 + high = mid - 1;
69842 + if (buid < uid)
69843 + low = mid + 1;
69844 + }
69845 +
69846 + return -1;
69847 +}
69848 +
69849 +static __inline__ void
69850 +gr_insertsort(void)
69851 +{
69852 + unsigned short i, j;
69853 + struct crash_uid index;
69854 +
69855 + for (i = 1; i < uid_used; i++) {
69856 + index = uid_set[i];
69857 + j = i;
69858 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
69859 + uid_set[j] = uid_set[j - 1];
69860 + j--;
69861 + }
69862 + uid_set[j] = index;
69863 + }
69864 +
69865 + return;
69866 +}
69867 +
69868 +static __inline__ void
69869 +gr_insert_uid(const kuid_t kuid, const unsigned long expires)
69870 +{
69871 + int loc;
69872 + uid_t uid = GR_GLOBAL_UID(kuid);
69873 +
69874 + if (uid_used == GR_UIDTABLE_MAX)
69875 + return;
69876 +
69877 + loc = gr_find_uid(uid);
69878 +
69879 + if (loc >= 0) {
69880 + uid_set[loc].expires = expires;
69881 + return;
69882 + }
69883 +
69884 + uid_set[uid_used].uid = uid;
69885 + uid_set[uid_used].expires = expires;
69886 + uid_used++;
69887 +
69888 + gr_insertsort();
69889 +
69890 + return;
69891 +}
69892 +
69893 +void
69894 +gr_remove_uid(const unsigned short loc)
69895 +{
69896 + unsigned short i;
69897 +
69898 + for (i = loc + 1; i < uid_used; i++)
69899 + uid_set[i - 1] = uid_set[i];
69900 +
69901 + uid_used--;
69902 +
69903 + return;
69904 +}
69905 +
69906 +int
69907 +gr_check_crash_uid(const kuid_t kuid)
69908 +{
69909 + int loc;
69910 + int ret = 0;
69911 + uid_t uid;
69912 +
69913 + if (unlikely(!gr_acl_is_enabled()))
69914 + return 0;
69915 +
69916 + uid = GR_GLOBAL_UID(kuid);
69917 +
69918 + spin_lock(&gr_uid_lock);
69919 + loc = gr_find_uid(uid);
69920 +
69921 + if (loc < 0)
69922 + goto out_unlock;
69923 +
69924 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
69925 + gr_remove_uid(loc);
69926 + else
69927 + ret = 1;
69928 +
69929 +out_unlock:
69930 + spin_unlock(&gr_uid_lock);
69931 + return ret;
69932 +}
69933 +
69934 +static __inline__ int
69935 +proc_is_setxid(const struct cred *cred)
69936 +{
69937 + if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
69938 + !uid_eq(cred->uid, cred->fsuid))
69939 + return 1;
69940 + if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
69941 + !gid_eq(cred->gid, cred->fsgid))
69942 + return 1;
69943 +
69944 + return 0;
69945 +}
69946 +
69947 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
69948 +
69949 +void
69950 +gr_handle_crash(struct task_struct *task, const int sig)
69951 +{
69952 + struct acl_subject_label *curr;
69953 + struct task_struct *tsk, *tsk2;
69954 + const struct cred *cred;
69955 + const struct cred *cred2;
69956 +
69957 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
69958 + return;
69959 +
69960 + if (unlikely(!gr_acl_is_enabled()))
69961 + return;
69962 +
69963 + curr = task->acl;
69964 +
69965 + if (!(curr->resmask & (1U << GR_CRASH_RES)))
69966 + return;
69967 +
69968 + if (time_before_eq(curr->expires, get_seconds())) {
69969 + curr->expires = 0;
69970 + curr->crashes = 0;
69971 + }
69972 +
69973 + curr->crashes++;
69974 +
69975 + if (!curr->expires)
69976 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
69977 +
69978 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
69979 + time_after(curr->expires, get_seconds())) {
69980 + rcu_read_lock();
69981 + cred = __task_cred(task);
69982 + if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
69983 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
69984 + spin_lock(&gr_uid_lock);
69985 + gr_insert_uid(cred->uid, curr->expires);
69986 + spin_unlock(&gr_uid_lock);
69987 + curr->expires = 0;
69988 + curr->crashes = 0;
69989 + read_lock(&tasklist_lock);
69990 + do_each_thread(tsk2, tsk) {
69991 + cred2 = __task_cred(tsk);
69992 + if (tsk != task && uid_eq(cred2->uid, cred->uid))
69993 + gr_fake_force_sig(SIGKILL, tsk);
69994 + } while_each_thread(tsk2, tsk);
69995 + read_unlock(&tasklist_lock);
69996 + } else {
69997 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
69998 + read_lock(&tasklist_lock);
69999 + read_lock(&grsec_exec_file_lock);
70000 + do_each_thread(tsk2, tsk) {
70001 + if (likely(tsk != task)) {
70002 + // if this thread has the same subject as the one that triggered
70003 + // RES_CRASH and it's the same binary, kill it
70004 + if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
70005 + gr_fake_force_sig(SIGKILL, tsk);
70006 + }
70007 + } while_each_thread(tsk2, tsk);
70008 + read_unlock(&grsec_exec_file_lock);
70009 + read_unlock(&tasklist_lock);
70010 + }
70011 + rcu_read_unlock();
70012 + }
70013 +
70014 + return;
70015 +}
70016 +
70017 +int
70018 +gr_check_crash_exec(const struct file *filp)
70019 +{
70020 + struct acl_subject_label *curr;
70021 +
70022 + if (unlikely(!gr_acl_is_enabled()))
70023 + return 0;
70024 +
70025 + read_lock(&gr_inode_lock);
70026 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
70027 + __get_dev(filp->f_path.dentry),
70028 + current->role);
70029 + read_unlock(&gr_inode_lock);
70030 +
70031 + if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
70032 + (!curr->crashes && !curr->expires))
70033 + return 0;
70034 +
70035 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
70036 + time_after(curr->expires, get_seconds()))
70037 + return 1;
70038 + else if (time_before_eq(curr->expires, get_seconds())) {
70039 + curr->crashes = 0;
70040 + curr->expires = 0;
70041 + }
70042 +
70043 + return 0;
70044 +}
70045 +
70046 +void
70047 +gr_handle_alertkill(struct task_struct *task)
70048 +{
70049 + struct acl_subject_label *curracl;
70050 + __u32 curr_ip;
70051 + struct task_struct *p, *p2;
70052 +
70053 + if (unlikely(!gr_acl_is_enabled()))
70054 + return;
70055 +
70056 + curracl = task->acl;
70057 + curr_ip = task->signal->curr_ip;
70058 +
70059 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
70060 + read_lock(&tasklist_lock);
70061 + do_each_thread(p2, p) {
70062 + if (p->signal->curr_ip == curr_ip)
70063 + gr_fake_force_sig(SIGKILL, p);
70064 + } while_each_thread(p2, p);
70065 + read_unlock(&tasklist_lock);
70066 + } else if (curracl->mode & GR_KILLPROC)
70067 + gr_fake_force_sig(SIGKILL, task);
70068 +
70069 + return;
70070 +}
70071 diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
70072 new file mode 100644
70073 index 0000000..98011b0
70074 --- /dev/null
70075 +++ b/grsecurity/gracl_shm.c
70076 @@ -0,0 +1,40 @@
70077 +#include <linux/kernel.h>
70078 +#include <linux/mm.h>
70079 +#include <linux/sched.h>
70080 +#include <linux/file.h>
70081 +#include <linux/ipc.h>
70082 +#include <linux/gracl.h>
70083 +#include <linux/grsecurity.h>
70084 +#include <linux/grinternal.h>
70085 +
70086 +int
70087 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70088 + const time_t shm_createtime, const kuid_t cuid, const int shmid)
70089 +{
70090 + struct task_struct *task;
70091 +
70092 + if (!gr_acl_is_enabled())
70093 + return 1;
70094 +
70095 + rcu_read_lock();
70096 + read_lock(&tasklist_lock);
70097 +
70098 + task = find_task_by_vpid(shm_cprid);
70099 +
70100 + if (unlikely(!task))
70101 + task = find_task_by_vpid(shm_lapid);
70102 +
70103 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
70104 + (task_pid_nr(task) == shm_lapid)) &&
70105 + (task->acl->mode & GR_PROTSHM) &&
70106 + (task->acl != current->acl))) {
70107 + read_unlock(&tasklist_lock);
70108 + rcu_read_unlock();
70109 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
70110 + return 0;
70111 + }
70112 + read_unlock(&tasklist_lock);
70113 + rcu_read_unlock();
70114 +
70115 + return 1;
70116 +}
70117 diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
70118 new file mode 100644
70119 index 0000000..bc0be01
70120 --- /dev/null
70121 +++ b/grsecurity/grsec_chdir.c
70122 @@ -0,0 +1,19 @@
70123 +#include <linux/kernel.h>
70124 +#include <linux/sched.h>
70125 +#include <linux/fs.h>
70126 +#include <linux/file.h>
70127 +#include <linux/grsecurity.h>
70128 +#include <linux/grinternal.h>
70129 +
70130 +void
70131 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
70132 +{
70133 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
70134 + if ((grsec_enable_chdir && grsec_enable_group &&
70135 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
70136 + !grsec_enable_group)) {
70137 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
70138 + }
70139 +#endif
70140 + return;
70141 +}
70142 diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
70143 new file mode 100644
70144 index 0000000..e10b319
70145 --- /dev/null
70146 +++ b/grsecurity/grsec_chroot.c
70147 @@ -0,0 +1,370 @@
70148 +#include <linux/kernel.h>
70149 +#include <linux/module.h>
70150 +#include <linux/sched.h>
70151 +#include <linux/file.h>
70152 +#include <linux/fs.h>
70153 +#include <linux/mount.h>
70154 +#include <linux/types.h>
70155 +#include "../fs/mount.h"
70156 +#include <linux/grsecurity.h>
70157 +#include <linux/grinternal.h>
70158 +
70159 +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
70160 +int gr_init_ran;
70161 +#endif
70162 +
70163 +void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
70164 +{
70165 +#ifdef CONFIG_GRKERNSEC
70166 + if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
70167 + path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
70168 +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
70169 + && gr_init_ran
70170 +#endif
70171 + )
70172 + task->gr_is_chrooted = 1;
70173 + else {
70174 +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
70175 + if (task_pid_nr(task) == 1 && !gr_init_ran)
70176 + gr_init_ran = 1;
70177 +#endif
70178 + task->gr_is_chrooted = 0;
70179 + }
70180 +
70181 + task->gr_chroot_dentry = path->dentry;
70182 +#endif
70183 + return;
70184 +}
70185 +
70186 +void gr_clear_chroot_entries(struct task_struct *task)
70187 +{
70188 +#ifdef CONFIG_GRKERNSEC
70189 + task->gr_is_chrooted = 0;
70190 + task->gr_chroot_dentry = NULL;
70191 +#endif
70192 + return;
70193 +}
70194 +
70195 +int
70196 +gr_handle_chroot_unix(const pid_t pid)
70197 +{
70198 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
70199 + struct task_struct *p;
70200 +
70201 + if (unlikely(!grsec_enable_chroot_unix))
70202 + return 1;
70203 +
70204 + if (likely(!proc_is_chrooted(current)))
70205 + return 1;
70206 +
70207 + rcu_read_lock();
70208 + read_lock(&tasklist_lock);
70209 + p = find_task_by_vpid_unrestricted(pid);
70210 + if (unlikely(p && !have_same_root(current, p))) {
70211 + read_unlock(&tasklist_lock);
70212 + rcu_read_unlock();
70213 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
70214 + return 0;
70215 + }
70216 + read_unlock(&tasklist_lock);
70217 + rcu_read_unlock();
70218 +#endif
70219 + return 1;
70220 +}
70221 +
70222 +int
70223 +gr_handle_chroot_nice(void)
70224 +{
70225 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
70226 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
70227 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
70228 + return -EPERM;
70229 + }
70230 +#endif
70231 + return 0;
70232 +}
70233 +
70234 +int
70235 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
70236 +{
70237 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
70238 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
70239 + && proc_is_chrooted(current)) {
70240 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
70241 + return -EACCES;
70242 + }
70243 +#endif
70244 + return 0;
70245 +}
70246 +
70247 +int
70248 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
70249 +{
70250 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
70251 + struct task_struct *p;
70252 + int ret = 0;
70253 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
70254 + return ret;
70255 +
70256 + read_lock(&tasklist_lock);
70257 + do_each_pid_task(pid, type, p) {
70258 + if (!have_same_root(current, p)) {
70259 + ret = 1;
70260 + goto out;
70261 + }
70262 + } while_each_pid_task(pid, type, p);
70263 +out:
70264 + read_unlock(&tasklist_lock);
70265 + return ret;
70266 +#endif
70267 + return 0;
70268 +}
70269 +
70270 +int
70271 +gr_pid_is_chrooted(struct task_struct *p)
70272 +{
70273 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
70274 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
70275 + return 0;
70276 +
70277 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
70278 + !have_same_root(current, p)) {
70279 + return 1;
70280 + }
70281 +#endif
70282 + return 0;
70283 +}
70284 +
70285 +EXPORT_SYMBOL(gr_pid_is_chrooted);
70286 +
70287 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
70288 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
70289 +{
70290 + struct path path, currentroot;
70291 + int ret = 0;
70292 +
70293 + path.dentry = (struct dentry *)u_dentry;
70294 + path.mnt = (struct vfsmount *)u_mnt;
70295 + get_fs_root(current->fs, &currentroot);
70296 + if (path_is_under(&path, &currentroot))
70297 + ret = 1;
70298 + path_put(&currentroot);
70299 +
70300 + return ret;
70301 +}
70302 +#endif
70303 +
70304 +int
70305 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
70306 +{
70307 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
70308 + if (!grsec_enable_chroot_fchdir)
70309 + return 1;
70310 +
70311 + if (!proc_is_chrooted(current))
70312 + return 1;
70313 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
70314 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
70315 + return 0;
70316 + }
70317 +#endif
70318 + return 1;
70319 +}
70320 +
70321 +int
70322 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70323 + const time_t shm_createtime)
70324 +{
70325 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
70326 + struct task_struct *p;
70327 + time_t starttime;
70328 +
70329 + if (unlikely(!grsec_enable_chroot_shmat))
70330 + return 1;
70331 +
70332 + if (likely(!proc_is_chrooted(current)))
70333 + return 1;
70334 +
70335 + rcu_read_lock();
70336 + read_lock(&tasklist_lock);
70337 +
70338 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
70339 + starttime = p->start_time.tv_sec;
70340 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
70341 + if (have_same_root(current, p)) {
70342 + goto allow;
70343 + } else {
70344 + read_unlock(&tasklist_lock);
70345 + rcu_read_unlock();
70346 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
70347 + return 0;
70348 + }
70349 + }
70350 + /* creator exited, pid reuse, fall through to next check */
70351 + }
70352 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
70353 + if (unlikely(!have_same_root(current, p))) {
70354 + read_unlock(&tasklist_lock);
70355 + rcu_read_unlock();
70356 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
70357 + return 0;
70358 + }
70359 + }
70360 +
70361 +allow:
70362 + read_unlock(&tasklist_lock);
70363 + rcu_read_unlock();
70364 +#endif
70365 + return 1;
70366 +}
70367 +
70368 +void
70369 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
70370 +{
70371 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
70372 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
70373 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
70374 +#endif
70375 + return;
70376 +}
70377 +
70378 +int
70379 +gr_handle_chroot_mknod(const struct dentry *dentry,
70380 + const struct vfsmount *mnt, const int mode)
70381 +{
70382 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
70383 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
70384 + proc_is_chrooted(current)) {
70385 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
70386 + return -EPERM;
70387 + }
70388 +#endif
70389 + return 0;
70390 +}
70391 +
70392 +int
70393 +gr_handle_chroot_mount(const struct dentry *dentry,
70394 + const struct vfsmount *mnt, const char *dev_name)
70395 +{
70396 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
70397 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
70398 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
70399 + return -EPERM;
70400 + }
70401 +#endif
70402 + return 0;
70403 +}
70404 +
70405 +int
70406 +gr_handle_chroot_pivot(void)
70407 +{
70408 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
70409 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
70410 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
70411 + return -EPERM;
70412 + }
70413 +#endif
70414 + return 0;
70415 +}
70416 +
70417 +int
70418 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
70419 +{
70420 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
70421 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
70422 + !gr_is_outside_chroot(dentry, mnt)) {
70423 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
70424 + return -EPERM;
70425 + }
70426 +#endif
70427 + return 0;
70428 +}
70429 +
70430 +extern const char *captab_log[];
70431 +extern int captab_log_entries;
70432 +
70433 +int
70434 +gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
70435 +{
70436 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70437 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
70438 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
70439 + if (cap_raised(chroot_caps, cap)) {
70440 + if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
70441 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
70442 + }
70443 + return 0;
70444 + }
70445 + }
70446 +#endif
70447 + return 1;
70448 +}
70449 +
70450 +int
70451 +gr_chroot_is_capable(const int cap)
70452 +{
70453 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70454 + return gr_task_chroot_is_capable(current, current_cred(), cap);
70455 +#endif
70456 + return 1;
70457 +}
70458 +
70459 +int
70460 +gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
70461 +{
70462 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70463 + if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
70464 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
70465 + if (cap_raised(chroot_caps, cap)) {
70466 + return 0;
70467 + }
70468 + }
70469 +#endif
70470 + return 1;
70471 +}
70472 +
70473 +int
70474 +gr_chroot_is_capable_nolog(const int cap)
70475 +{
70476 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
70477 + return gr_task_chroot_is_capable_nolog(current, cap);
70478 +#endif
70479 + return 1;
70480 +}
70481 +
70482 +int
70483 +gr_handle_chroot_sysctl(const int op)
70484 +{
70485 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
70486 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
70487 + proc_is_chrooted(current))
70488 + return -EACCES;
70489 +#endif
70490 + return 0;
70491 +}
70492 +
70493 +void
70494 +gr_handle_chroot_chdir(const struct path *path)
70495 +{
70496 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
70497 + if (grsec_enable_chroot_chdir)
70498 + set_fs_pwd(current->fs, path);
70499 +#endif
70500 + return;
70501 +}
70502 +
70503 +int
70504 +gr_handle_chroot_chmod(const struct dentry *dentry,
70505 + const struct vfsmount *mnt, const int mode)
70506 +{
70507 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
70508 + /* allow chmod +s on directories, but not files */
70509 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
70510 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
70511 + proc_is_chrooted(current)) {
70512 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
70513 + return -EPERM;
70514 + }
70515 +#endif
70516 + return 0;
70517 +}
70518 diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
70519 new file mode 100644
70520 index 0000000..52b3e30
70521 --- /dev/null
70522 +++ b/grsecurity/grsec_disabled.c
70523 @@ -0,0 +1,433 @@
70524 +#include <linux/kernel.h>
70525 +#include <linux/module.h>
70526 +#include <linux/sched.h>
70527 +#include <linux/file.h>
70528 +#include <linux/fs.h>
70529 +#include <linux/kdev_t.h>
70530 +#include <linux/net.h>
70531 +#include <linux/in.h>
70532 +#include <linux/ip.h>
70533 +#include <linux/skbuff.h>
70534 +#include <linux/sysctl.h>
70535 +
70536 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
70537 +void
70538 +pax_set_initial_flags(struct linux_binprm *bprm)
70539 +{
70540 + return;
70541 +}
70542 +#endif
70543 +
70544 +#ifdef CONFIG_SYSCTL
70545 +__u32
70546 +gr_handle_sysctl(const struct ctl_table * table, const int op)
70547 +{
70548 + return 0;
70549 +}
70550 +#endif
70551 +
70552 +#ifdef CONFIG_TASKSTATS
70553 +int gr_is_taskstats_denied(int pid)
70554 +{
70555 + return 0;
70556 +}
70557 +#endif
70558 +
70559 +int
70560 +gr_acl_is_enabled(void)
70561 +{
70562 + return 0;
70563 +}
70564 +
70565 +void
70566 +gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
70567 +{
70568 + return;
70569 +}
70570 +
70571 +int
70572 +gr_handle_rawio(const struct inode *inode)
70573 +{
70574 + return 0;
70575 +}
70576 +
70577 +void
70578 +gr_acl_handle_psacct(struct task_struct *task, const long code)
70579 +{
70580 + return;
70581 +}
70582 +
70583 +int
70584 +gr_handle_ptrace(struct task_struct *task, const long request)
70585 +{
70586 + return 0;
70587 +}
70588 +
70589 +int
70590 +gr_handle_proc_ptrace(struct task_struct *task)
70591 +{
70592 + return 0;
70593 +}
70594 +
70595 +int
70596 +gr_set_acls(const int type)
70597 +{
70598 + return 0;
70599 +}
70600 +
70601 +int
70602 +gr_check_hidden_task(const struct task_struct *tsk)
70603 +{
70604 + return 0;
70605 +}
70606 +
70607 +int
70608 +gr_check_protected_task(const struct task_struct *task)
70609 +{
70610 + return 0;
70611 +}
70612 +
70613 +int
70614 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
70615 +{
70616 + return 0;
70617 +}
70618 +
70619 +void
70620 +gr_copy_label(struct task_struct *tsk)
70621 +{
70622 + return;
70623 +}
70624 +
70625 +void
70626 +gr_set_pax_flags(struct task_struct *task)
70627 +{
70628 + return;
70629 +}
70630 +
70631 +int
70632 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
70633 + const int unsafe_share)
70634 +{
70635 + return 0;
70636 +}
70637 +
70638 +void
70639 +gr_handle_delete(const ino_t ino, const dev_t dev)
70640 +{
70641 + return;
70642 +}
70643 +
70644 +void
70645 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
70646 +{
70647 + return;
70648 +}
70649 +
70650 +void
70651 +gr_handle_crash(struct task_struct *task, const int sig)
70652 +{
70653 + return;
70654 +}
70655 +
70656 +int
70657 +gr_check_crash_exec(const struct file *filp)
70658 +{
70659 + return 0;
70660 +}
70661 +
70662 +int
70663 +gr_check_crash_uid(const kuid_t uid)
70664 +{
70665 + return 0;
70666 +}
70667 +
70668 +void
70669 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
70670 + struct dentry *old_dentry,
70671 + struct dentry *new_dentry,
70672 + struct vfsmount *mnt, const __u8 replace)
70673 +{
70674 + return;
70675 +}
70676 +
70677 +int
70678 +gr_search_socket(const int family, const int type, const int protocol)
70679 +{
70680 + return 1;
70681 +}
70682 +
70683 +int
70684 +gr_search_connectbind(const int mode, const struct socket *sock,
70685 + const struct sockaddr_in *addr)
70686 +{
70687 + return 0;
70688 +}
70689 +
70690 +void
70691 +gr_handle_alertkill(struct task_struct *task)
70692 +{
70693 + return;
70694 +}
70695 +
70696 +__u32
70697 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
70698 +{
70699 + return 1;
70700 +}
70701 +
70702 +__u32
70703 +gr_acl_handle_hidden_file(const struct dentry * dentry,
70704 + const struct vfsmount * mnt)
70705 +{
70706 + return 1;
70707 +}
70708 +
70709 +__u32
70710 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
70711 + int acc_mode)
70712 +{
70713 + return 1;
70714 +}
70715 +
70716 +__u32
70717 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
70718 +{
70719 + return 1;
70720 +}
70721 +
70722 +__u32
70723 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
70724 +{
70725 + return 1;
70726 +}
70727 +
70728 +int
70729 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
70730 + unsigned int *vm_flags)
70731 +{
70732 + return 1;
70733 +}
70734 +
70735 +__u32
70736 +gr_acl_handle_truncate(const struct dentry * dentry,
70737 + const struct vfsmount * mnt)
70738 +{
70739 + return 1;
70740 +}
70741 +
70742 +__u32
70743 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
70744 +{
70745 + return 1;
70746 +}
70747 +
70748 +__u32
70749 +gr_acl_handle_access(const struct dentry * dentry,
70750 + const struct vfsmount * mnt, const int fmode)
70751 +{
70752 + return 1;
70753 +}
70754 +
70755 +__u32
70756 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
70757 + umode_t *mode)
70758 +{
70759 + return 1;
70760 +}
70761 +
70762 +__u32
70763 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
70764 +{
70765 + return 1;
70766 +}
70767 +
70768 +__u32
70769 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
70770 +{
70771 + return 1;
70772 +}
70773 +
70774 +__u32
70775 +gr_acl_handle_removexattr(const struct dentry * dentry, const struct vfsmount * mnt)
70776 +{
70777 + return 1;
70778 +}
70779 +
70780 +void
70781 +grsecurity_init(void)
70782 +{
70783 + return;
70784 +}
70785 +
70786 +umode_t gr_acl_umask(void)
70787 +{
70788 + return 0;
70789 +}
70790 +
70791 +__u32
70792 +gr_acl_handle_mknod(const struct dentry * new_dentry,
70793 + const struct dentry * parent_dentry,
70794 + const struct vfsmount * parent_mnt,
70795 + const int mode)
70796 +{
70797 + return 1;
70798 +}
70799 +
70800 +__u32
70801 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
70802 + const struct dentry * parent_dentry,
70803 + const struct vfsmount * parent_mnt)
70804 +{
70805 + return 1;
70806 +}
70807 +
70808 +__u32
70809 +gr_acl_handle_symlink(const struct dentry * new_dentry,
70810 + const struct dentry * parent_dentry,
70811 + const struct vfsmount * parent_mnt, const struct filename *from)
70812 +{
70813 + return 1;
70814 +}
70815 +
70816 +__u32
70817 +gr_acl_handle_link(const struct dentry * new_dentry,
70818 + const struct dentry * parent_dentry,
70819 + const struct vfsmount * parent_mnt,
70820 + const struct dentry * old_dentry,
70821 + const struct vfsmount * old_mnt, const struct filename *to)
70822 +{
70823 + return 1;
70824 +}
70825 +
70826 +int
70827 +gr_acl_handle_rename(const struct dentry *new_dentry,
70828 + const struct dentry *parent_dentry,
70829 + const struct vfsmount *parent_mnt,
70830 + const struct dentry *old_dentry,
70831 + const struct inode *old_parent_inode,
70832 + const struct vfsmount *old_mnt, const struct filename *newname)
70833 +{
70834 + return 0;
70835 +}
70836 +
70837 +int
70838 +gr_acl_handle_filldir(const struct file *file, const char *name,
70839 + const int namelen, const ino_t ino)
70840 +{
70841 + return 1;
70842 +}
70843 +
70844 +int
70845 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70846 + const time_t shm_createtime, const kuid_t cuid, const int shmid)
70847 +{
70848 + return 1;
70849 +}
70850 +
70851 +int
70852 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
70853 +{
70854 + return 0;
70855 +}
70856 +
70857 +int
70858 +gr_search_accept(const struct socket *sock)
70859 +{
70860 + return 0;
70861 +}
70862 +
70863 +int
70864 +gr_search_listen(const struct socket *sock)
70865 +{
70866 + return 0;
70867 +}
70868 +
70869 +int
70870 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
70871 +{
70872 + return 0;
70873 +}
70874 +
70875 +__u32
70876 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
70877 +{
70878 + return 1;
70879 +}
70880 +
70881 +__u32
70882 +gr_acl_handle_creat(const struct dentry * dentry,
70883 + const struct dentry * p_dentry,
70884 + const struct vfsmount * p_mnt, int open_flags, int acc_mode,
70885 + const int imode)
70886 +{
70887 + return 1;
70888 +}
70889 +
70890 +void
70891 +gr_acl_handle_exit(void)
70892 +{
70893 + return;
70894 +}
70895 +
70896 +int
70897 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
70898 +{
70899 + return 1;
70900 +}
70901 +
70902 +void
70903 +gr_set_role_label(const kuid_t uid, const kgid_t gid)
70904 +{
70905 + return;
70906 +}
70907 +
70908 +int
70909 +gr_acl_handle_procpidmem(const struct task_struct *task)
70910 +{
70911 + return 0;
70912 +}
70913 +
70914 +int
70915 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
70916 +{
70917 + return 0;
70918 +}
70919 +
70920 +int
70921 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
70922 +{
70923 + return 0;
70924 +}
70925 +
70926 +int
70927 +gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
70928 +{
70929 + return 0;
70930 +}
70931 +
70932 +int
70933 +gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
70934 +{
70935 + return 0;
70936 +}
70937 +
70938 +int gr_acl_enable_at_secure(void)
70939 +{
70940 + return 0;
70941 +}
70942 +
70943 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
70944 +{
70945 + return dentry->d_sb->s_dev;
70946 +}
70947 +
70948 +void gr_put_exec_file(struct task_struct *task)
70949 +{
70950 + return;
70951 +}
70952 +
70953 +#ifdef CONFIG_SECURITY
70954 +EXPORT_SYMBOL(gr_check_user_change);
70955 +EXPORT_SYMBOL(gr_check_group_change);
70956 +#endif
70957 diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
70958 new file mode 100644
70959 index 0000000..387032b
70960 --- /dev/null
70961 +++ b/grsecurity/grsec_exec.c
70962 @@ -0,0 +1,187 @@
70963 +#include <linux/kernel.h>
70964 +#include <linux/sched.h>
70965 +#include <linux/file.h>
70966 +#include <linux/binfmts.h>
70967 +#include <linux/fs.h>
70968 +#include <linux/types.h>
70969 +#include <linux/grdefs.h>
70970 +#include <linux/grsecurity.h>
70971 +#include <linux/grinternal.h>
70972 +#include <linux/capability.h>
70973 +#include <linux/module.h>
70974 +#include <linux/compat.h>
70975 +
70976 +#include <asm/uaccess.h>
70977 +
70978 +#ifdef CONFIG_GRKERNSEC_EXECLOG
70979 +static char gr_exec_arg_buf[132];
70980 +static DEFINE_MUTEX(gr_exec_arg_mutex);
70981 +#endif
70982 +
70983 +struct user_arg_ptr {
70984 +#ifdef CONFIG_COMPAT
70985 + bool is_compat;
70986 +#endif
70987 + union {
70988 + const char __user *const __user *native;
70989 +#ifdef CONFIG_COMPAT
70990 + const compat_uptr_t __user *compat;
70991 +#endif
70992 + } ptr;
70993 +};
70994 +
70995 +extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
70996 +
70997 +void
70998 +gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
70999 +{
71000 +#ifdef CONFIG_GRKERNSEC_EXECLOG
71001 + char *grarg = gr_exec_arg_buf;
71002 + unsigned int i, x, execlen = 0;
71003 + char c;
71004 +
71005 + if (!((grsec_enable_execlog && grsec_enable_group &&
71006 + in_group_p(grsec_audit_gid))
71007 + || (grsec_enable_execlog && !grsec_enable_group)))
71008 + return;
71009 +
71010 + mutex_lock(&gr_exec_arg_mutex);
71011 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
71012 +
71013 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
71014 + const char __user *p;
71015 + unsigned int len;
71016 +
71017 + p = get_user_arg_ptr(argv, i);
71018 + if (IS_ERR(p))
71019 + goto log;
71020 +
71021 + len = strnlen_user(p, 128 - execlen);
71022 + if (len > 128 - execlen)
71023 + len = 128 - execlen;
71024 + else if (len > 0)
71025 + len--;
71026 + if (copy_from_user(grarg + execlen, p, len))
71027 + goto log;
71028 +
71029 + /* rewrite unprintable characters */
71030 + for (x = 0; x < len; x++) {
71031 + c = *(grarg + execlen + x);
71032 + if (c < 32 || c > 126)
71033 + *(grarg + execlen + x) = ' ';
71034 + }
71035 +
71036 + execlen += len;
71037 + *(grarg + execlen) = ' ';
71038 + *(grarg + execlen + 1) = '\0';
71039 + execlen++;
71040 + }
71041 +
71042 + log:
71043 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
71044 + bprm->file->f_path.mnt, grarg);
71045 + mutex_unlock(&gr_exec_arg_mutex);
71046 +#endif
71047 + return;
71048 +}
71049 +
71050 +#ifdef CONFIG_GRKERNSEC
71051 +extern int gr_acl_is_capable(const int cap);
71052 +extern int gr_acl_is_capable_nolog(const int cap);
71053 +extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
71054 +extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
71055 +extern int gr_chroot_is_capable(const int cap);
71056 +extern int gr_chroot_is_capable_nolog(const int cap);
71057 +extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
71058 +extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
71059 +#endif
71060 +
71061 +const char *captab_log[] = {
71062 + "CAP_CHOWN",
71063 + "CAP_DAC_OVERRIDE",
71064 + "CAP_DAC_READ_SEARCH",
71065 + "CAP_FOWNER",
71066 + "CAP_FSETID",
71067 + "CAP_KILL",
71068 + "CAP_SETGID",
71069 + "CAP_SETUID",
71070 + "CAP_SETPCAP",
71071 + "CAP_LINUX_IMMUTABLE",
71072 + "CAP_NET_BIND_SERVICE",
71073 + "CAP_NET_BROADCAST",
71074 + "CAP_NET_ADMIN",
71075 + "CAP_NET_RAW",
71076 + "CAP_IPC_LOCK",
71077 + "CAP_IPC_OWNER",
71078 + "CAP_SYS_MODULE",
71079 + "CAP_SYS_RAWIO",
71080 + "CAP_SYS_CHROOT",
71081 + "CAP_SYS_PTRACE",
71082 + "CAP_SYS_PACCT",
71083 + "CAP_SYS_ADMIN",
71084 + "CAP_SYS_BOOT",
71085 + "CAP_SYS_NICE",
71086 + "CAP_SYS_RESOURCE",
71087 + "CAP_SYS_TIME",
71088 + "CAP_SYS_TTY_CONFIG",
71089 + "CAP_MKNOD",
71090 + "CAP_LEASE",
71091 + "CAP_AUDIT_WRITE",
71092 + "CAP_AUDIT_CONTROL",
71093 + "CAP_SETFCAP",
71094 + "CAP_MAC_OVERRIDE",
71095 + "CAP_MAC_ADMIN",
71096 + "CAP_SYSLOG",
71097 + "CAP_WAKE_ALARM"
71098 +};
71099 +
71100 +int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
71101 +
71102 +int gr_is_capable(const int cap)
71103 +{
71104 +#ifdef CONFIG_GRKERNSEC
71105 + if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
71106 + return 1;
71107 + return 0;
71108 +#else
71109 + return 1;
71110 +#endif
71111 +}
71112 +
71113 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
71114 +{
71115 +#ifdef CONFIG_GRKERNSEC
71116 + if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
71117 + return 1;
71118 + return 0;
71119 +#else
71120 + return 1;
71121 +#endif
71122 +}
71123 +
71124 +int gr_is_capable_nolog(const int cap)
71125 +{
71126 +#ifdef CONFIG_GRKERNSEC
71127 + if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
71128 + return 1;
71129 + return 0;
71130 +#else
71131 + return 1;
71132 +#endif
71133 +}
71134 +
71135 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
71136 +{
71137 +#ifdef CONFIG_GRKERNSEC
71138 + if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
71139 + return 1;
71140 + return 0;
71141 +#else
71142 + return 1;
71143 +#endif
71144 +}
71145 +
71146 +EXPORT_SYMBOL(gr_is_capable);
71147 +EXPORT_SYMBOL(gr_is_capable_nolog);
71148 +EXPORT_SYMBOL(gr_task_is_capable);
71149 +EXPORT_SYMBOL(gr_task_is_capable_nolog);
71150 diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
71151 new file mode 100644
71152 index 0000000..06cc6ea
71153 --- /dev/null
71154 +++ b/grsecurity/grsec_fifo.c
71155 @@ -0,0 +1,24 @@
71156 +#include <linux/kernel.h>
71157 +#include <linux/sched.h>
71158 +#include <linux/fs.h>
71159 +#include <linux/file.h>
71160 +#include <linux/grinternal.h>
71161 +
71162 +int
71163 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
71164 + const struct dentry *dir, const int flag, const int acc_mode)
71165 +{
71166 +#ifdef CONFIG_GRKERNSEC_FIFO
71167 + const struct cred *cred = current_cred();
71168 +
71169 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
71170 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
71171 + !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
71172 + !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
71173 + if (!inode_permission(dentry->d_inode, acc_mode))
71174 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
71175 + return -EACCES;
71176 + }
71177 +#endif
71178 + return 0;
71179 +}
71180 diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
71181 new file mode 100644
71182 index 0000000..8ca18bf
71183 --- /dev/null
71184 +++ b/grsecurity/grsec_fork.c
71185 @@ -0,0 +1,23 @@
71186 +#include <linux/kernel.h>
71187 +#include <linux/sched.h>
71188 +#include <linux/grsecurity.h>
71189 +#include <linux/grinternal.h>
71190 +#include <linux/errno.h>
71191 +
71192 +void
71193 +gr_log_forkfail(const int retval)
71194 +{
71195 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
71196 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
71197 + switch (retval) {
71198 + case -EAGAIN:
71199 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
71200 + break;
71201 + case -ENOMEM:
71202 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
71203 + break;
71204 + }
71205 + }
71206 +#endif
71207 + return;
71208 +}
71209 diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
71210 new file mode 100644
71211 index 0000000..a88e901
71212 --- /dev/null
71213 +++ b/grsecurity/grsec_init.c
71214 @@ -0,0 +1,272 @@
71215 +#include <linux/kernel.h>
71216 +#include <linux/sched.h>
71217 +#include <linux/mm.h>
71218 +#include <linux/gracl.h>
71219 +#include <linux/slab.h>
71220 +#include <linux/vmalloc.h>
71221 +#include <linux/percpu.h>
71222 +#include <linux/module.h>
71223 +
71224 +int grsec_enable_ptrace_readexec;
71225 +int grsec_enable_setxid;
71226 +int grsec_enable_symlinkown;
71227 +kgid_t grsec_symlinkown_gid;
71228 +int grsec_enable_brute;
71229 +int grsec_enable_link;
71230 +int grsec_enable_dmesg;
71231 +int grsec_enable_harden_ptrace;
71232 +int grsec_enable_harden_ipc;
71233 +int grsec_enable_fifo;
71234 +int grsec_enable_execlog;
71235 +int grsec_enable_signal;
71236 +int grsec_enable_forkfail;
71237 +int grsec_enable_audit_ptrace;
71238 +int grsec_enable_time;
71239 +int grsec_enable_group;
71240 +kgid_t grsec_audit_gid;
71241 +int grsec_enable_chdir;
71242 +int grsec_enable_mount;
71243 +int grsec_enable_rofs;
71244 +int grsec_deny_new_usb;
71245 +int grsec_enable_chroot_findtask;
71246 +int grsec_enable_chroot_mount;
71247 +int grsec_enable_chroot_shmat;
71248 +int grsec_enable_chroot_fchdir;
71249 +int grsec_enable_chroot_double;
71250 +int grsec_enable_chroot_pivot;
71251 +int grsec_enable_chroot_chdir;
71252 +int grsec_enable_chroot_chmod;
71253 +int grsec_enable_chroot_mknod;
71254 +int grsec_enable_chroot_nice;
71255 +int grsec_enable_chroot_execlog;
71256 +int grsec_enable_chroot_caps;
71257 +int grsec_enable_chroot_sysctl;
71258 +int grsec_enable_chroot_unix;
71259 +int grsec_enable_tpe;
71260 +kgid_t grsec_tpe_gid;
71261 +int grsec_enable_blackhole;
71262 +#ifdef CONFIG_IPV6_MODULE
71263 +EXPORT_SYMBOL(grsec_enable_blackhole);
71264 +#endif
71265 +int grsec_lastack_retries;
71266 +int grsec_enable_tpe_all;
71267 +int grsec_enable_tpe_invert;
71268 +int grsec_enable_socket_all;
71269 +kgid_t grsec_socket_all_gid;
71270 +int grsec_enable_socket_client;
71271 +kgid_t grsec_socket_client_gid;
71272 +int grsec_enable_socket_server;
71273 +kgid_t grsec_socket_server_gid;
71274 +int grsec_resource_logging;
71275 +int grsec_disable_privio;
71276 +int grsec_enable_log_rwxmaps;
71277 +int grsec_lock;
71278 +
71279 +DEFINE_SPINLOCK(grsec_alert_lock);
71280 +unsigned long grsec_alert_wtime = 0;
71281 +unsigned long grsec_alert_fyet = 0;
71282 +
71283 +DEFINE_SPINLOCK(grsec_audit_lock);
71284 +
71285 +DEFINE_RWLOCK(grsec_exec_file_lock);
71286 +
71287 +char *gr_shared_page[4];
71288 +
71289 +char *gr_alert_log_fmt;
71290 +char *gr_audit_log_fmt;
71291 +char *gr_alert_log_buf;
71292 +char *gr_audit_log_buf;
71293 +
71294 +void __init
71295 +grsecurity_init(void)
71296 +{
71297 + int j;
71298 + /* create the per-cpu shared pages */
71299 +
71300 +#ifdef CONFIG_X86
71301 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
71302 +#endif
71303 +
71304 + for (j = 0; j < 4; j++) {
71305 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
71306 + if (gr_shared_page[j] == NULL) {
71307 + panic("Unable to allocate grsecurity shared page");
71308 + return;
71309 + }
71310 + }
71311 +
71312 + /* allocate log buffers */
71313 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
71314 + if (!gr_alert_log_fmt) {
71315 + panic("Unable to allocate grsecurity alert log format buffer");
71316 + return;
71317 + }
71318 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
71319 + if (!gr_audit_log_fmt) {
71320 + panic("Unable to allocate grsecurity audit log format buffer");
71321 + return;
71322 + }
71323 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
71324 + if (!gr_alert_log_buf) {
71325 + panic("Unable to allocate grsecurity alert log buffer");
71326 + return;
71327 + }
71328 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
71329 + if (!gr_audit_log_buf) {
71330 + panic("Unable to allocate grsecurity audit log buffer");
71331 + return;
71332 + }
71333 +
71334 +#ifdef CONFIG_GRKERNSEC_IO
71335 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
71336 + grsec_disable_privio = 1;
71337 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
71338 + grsec_disable_privio = 1;
71339 +#else
71340 + grsec_disable_privio = 0;
71341 +#endif
71342 +#endif
71343 +
71344 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
71345 + /* for backward compatibility, tpe_invert always defaults to on if
71346 + enabled in the kernel
71347 + */
71348 + grsec_enable_tpe_invert = 1;
71349 +#endif
71350 +
71351 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
71352 +#ifndef CONFIG_GRKERNSEC_SYSCTL
71353 + grsec_lock = 1;
71354 +#endif
71355 +
71356 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
71357 + grsec_enable_log_rwxmaps = 1;
71358 +#endif
71359 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
71360 + grsec_enable_group = 1;
71361 + grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
71362 +#endif
71363 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
71364 + grsec_enable_ptrace_readexec = 1;
71365 +#endif
71366 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
71367 + grsec_enable_chdir = 1;
71368 +#endif
71369 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
71370 + grsec_enable_harden_ptrace = 1;
71371 +#endif
71372 +#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
71373 + grsec_enable_harden_ipc = 1;
71374 +#endif
71375 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
71376 + grsec_enable_mount = 1;
71377 +#endif
71378 +#ifdef CONFIG_GRKERNSEC_LINK
71379 + grsec_enable_link = 1;
71380 +#endif
71381 +#ifdef CONFIG_GRKERNSEC_BRUTE
71382 + grsec_enable_brute = 1;
71383 +#endif
71384 +#ifdef CONFIG_GRKERNSEC_DMESG
71385 + grsec_enable_dmesg = 1;
71386 +#endif
71387 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
71388 + grsec_enable_blackhole = 1;
71389 + grsec_lastack_retries = 4;
71390 +#endif
71391 +#ifdef CONFIG_GRKERNSEC_FIFO
71392 + grsec_enable_fifo = 1;
71393 +#endif
71394 +#ifdef CONFIG_GRKERNSEC_EXECLOG
71395 + grsec_enable_execlog = 1;
71396 +#endif
71397 +#ifdef CONFIG_GRKERNSEC_SETXID
71398 + grsec_enable_setxid = 1;
71399 +#endif
71400 +#ifdef CONFIG_GRKERNSEC_SIGNAL
71401 + grsec_enable_signal = 1;
71402 +#endif
71403 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
71404 + grsec_enable_forkfail = 1;
71405 +#endif
71406 +#ifdef CONFIG_GRKERNSEC_TIME
71407 + grsec_enable_time = 1;
71408 +#endif
71409 +#ifdef CONFIG_GRKERNSEC_RESLOG
71410 + grsec_resource_logging = 1;
71411 +#endif
71412 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
71413 + grsec_enable_chroot_findtask = 1;
71414 +#endif
71415 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
71416 + grsec_enable_chroot_unix = 1;
71417 +#endif
71418 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
71419 + grsec_enable_chroot_mount = 1;
71420 +#endif
71421 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
71422 + grsec_enable_chroot_fchdir = 1;
71423 +#endif
71424 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
71425 + grsec_enable_chroot_shmat = 1;
71426 +#endif
71427 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
71428 + grsec_enable_audit_ptrace = 1;
71429 +#endif
71430 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
71431 + grsec_enable_chroot_double = 1;
71432 +#endif
71433 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
71434 + grsec_enable_chroot_pivot = 1;
71435 +#endif
71436 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
71437 + grsec_enable_chroot_chdir = 1;
71438 +#endif
71439 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
71440 + grsec_enable_chroot_chmod = 1;
71441 +#endif
71442 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
71443 + grsec_enable_chroot_mknod = 1;
71444 +#endif
71445 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
71446 + grsec_enable_chroot_nice = 1;
71447 +#endif
71448 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
71449 + grsec_enable_chroot_execlog = 1;
71450 +#endif
71451 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
71452 + grsec_enable_chroot_caps = 1;
71453 +#endif
71454 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
71455 + grsec_enable_chroot_sysctl = 1;
71456 +#endif
71457 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
71458 + grsec_enable_symlinkown = 1;
71459 + grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
71460 +#endif
71461 +#ifdef CONFIG_GRKERNSEC_TPE
71462 + grsec_enable_tpe = 1;
71463 + grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
71464 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
71465 + grsec_enable_tpe_all = 1;
71466 +#endif
71467 +#endif
71468 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
71469 + grsec_enable_socket_all = 1;
71470 + grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
71471 +#endif
71472 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
71473 + grsec_enable_socket_client = 1;
71474 + grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
71475 +#endif
71476 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
71477 + grsec_enable_socket_server = 1;
71478 + grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
71479 +#endif
71480 +#endif
71481 +#ifdef CONFIG_GRKERNSEC_DENYUSB_FORCE
71482 + grsec_deny_new_usb = 1;
71483 +#endif
71484 +
71485 + return;
71486 +}
71487 diff --git a/grsecurity/grsec_ipc.c b/grsecurity/grsec_ipc.c
71488 new file mode 100644
71489 index 0000000..f365de0
71490 --- /dev/null
71491 +++ b/grsecurity/grsec_ipc.c
71492 @@ -0,0 +1,22 @@
71493 +#include <linux/kernel.h>
71494 +#include <linux/mm.h>
71495 +#include <linux/sched.h>
71496 +#include <linux/file.h>
71497 +#include <linux/ipc.h>
71498 +#include <linux/ipc_namespace.h>
71499 +#include <linux/grsecurity.h>
71500 +#include <linux/grinternal.h>
71501 +
71502 +int
71503 +gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode)
71504 +{
71505 +#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
71506 + int write = (requested_mode & 00002);
71507 +
71508 + if (grsec_enable_harden_ipc && !(requested_mode & ~granted_mode & 0007) && !ns_capable_nolog(ns->user_ns, CAP_IPC_OWNER)) {
71509 + gr_log_str2_int(GR_DONT_AUDIT, GR_IPC_DENIED_MSG, write ? "write" : "read", write ? "writ" : "read", GR_GLOBAL_UID(ipcp->cuid));
71510 + return 0;
71511 + }
71512 +#endif
71513 + return 1;
71514 +}
71515 diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
71516 new file mode 100644
71517 index 0000000..5e05e20
71518 --- /dev/null
71519 +++ b/grsecurity/grsec_link.c
71520 @@ -0,0 +1,58 @@
71521 +#include <linux/kernel.h>
71522 +#include <linux/sched.h>
71523 +#include <linux/fs.h>
71524 +#include <linux/file.h>
71525 +#include <linux/grinternal.h>
71526 +
71527 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
71528 +{
71529 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
71530 + const struct inode *link_inode = link->dentry->d_inode;
71531 +
71532 + if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
71533 + /* ignore root-owned links, e.g. /proc/self */
71534 + gr_is_global_nonroot(link_inode->i_uid) && target &&
71535 + !uid_eq(link_inode->i_uid, target->i_uid)) {
71536 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
71537 + return 1;
71538 + }
71539 +#endif
71540 + return 0;
71541 +}
71542 +
71543 +int
71544 +gr_handle_follow_link(const struct inode *parent,
71545 + const struct inode *inode,
71546 + const struct dentry *dentry, const struct vfsmount *mnt)
71547 +{
71548 +#ifdef CONFIG_GRKERNSEC_LINK
71549 + const struct cred *cred = current_cred();
71550 +
71551 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
71552 + (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
71553 + (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
71554 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
71555 + return -EACCES;
71556 + }
71557 +#endif
71558 + return 0;
71559 +}
71560 +
71561 +int
71562 +gr_handle_hardlink(const struct dentry *dentry,
71563 + const struct vfsmount *mnt,
71564 + struct inode *inode, const int mode, const struct filename *to)
71565 +{
71566 +#ifdef CONFIG_GRKERNSEC_LINK
71567 + const struct cred *cred = current_cred();
71568 +
71569 + if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
71570 + (!S_ISREG(mode) || is_privileged_binary(dentry) ||
71571 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
71572 + !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
71573 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
71574 + return -EPERM;
71575 + }
71576 +#endif
71577 + return 0;
71578 +}
71579 diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
71580 new file mode 100644
71581 index 0000000..dbe0a6b
71582 --- /dev/null
71583 +++ b/grsecurity/grsec_log.c
71584 @@ -0,0 +1,341 @@
71585 +#include <linux/kernel.h>
71586 +#include <linux/sched.h>
71587 +#include <linux/file.h>
71588 +#include <linux/tty.h>
71589 +#include <linux/fs.h>
71590 +#include <linux/mm.h>
71591 +#include <linux/grinternal.h>
71592 +
71593 +#ifdef CONFIG_TREE_PREEMPT_RCU
71594 +#define DISABLE_PREEMPT() preempt_disable()
71595 +#define ENABLE_PREEMPT() preempt_enable()
71596 +#else
71597 +#define DISABLE_PREEMPT()
71598 +#define ENABLE_PREEMPT()
71599 +#endif
71600 +
71601 +#define BEGIN_LOCKS(x) \
71602 + DISABLE_PREEMPT(); \
71603 + rcu_read_lock(); \
71604 + read_lock(&tasklist_lock); \
71605 + read_lock(&grsec_exec_file_lock); \
71606 + if (x != GR_DO_AUDIT) \
71607 + spin_lock(&grsec_alert_lock); \
71608 + else \
71609 + spin_lock(&grsec_audit_lock)
71610 +
71611 +#define END_LOCKS(x) \
71612 + if (x != GR_DO_AUDIT) \
71613 + spin_unlock(&grsec_alert_lock); \
71614 + else \
71615 + spin_unlock(&grsec_audit_lock); \
71616 + read_unlock(&grsec_exec_file_lock); \
71617 + read_unlock(&tasklist_lock); \
71618 + rcu_read_unlock(); \
71619 + ENABLE_PREEMPT(); \
71620 + if (x == GR_DONT_AUDIT) \
71621 + gr_handle_alertkill(current)
71622 +
71623 +enum {
71624 + FLOODING,
71625 + NO_FLOODING
71626 +};
71627 +
71628 +extern char *gr_alert_log_fmt;
71629 +extern char *gr_audit_log_fmt;
71630 +extern char *gr_alert_log_buf;
71631 +extern char *gr_audit_log_buf;
71632 +
71633 +static int gr_log_start(int audit)
71634 +{
71635 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
71636 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
71637 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
71638 +#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
71639 + unsigned long curr_secs = get_seconds();
71640 +
71641 + if (audit == GR_DO_AUDIT)
71642 + goto set_fmt;
71643 +
71644 + if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
71645 + grsec_alert_wtime = curr_secs;
71646 + grsec_alert_fyet = 0;
71647 + } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
71648 + && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
71649 + grsec_alert_fyet++;
71650 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
71651 + grsec_alert_wtime = curr_secs;
71652 + grsec_alert_fyet++;
71653 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
71654 + return FLOODING;
71655 + }
71656 + else return FLOODING;
71657 +
71658 +set_fmt:
71659 +#endif
71660 + memset(buf, 0, PAGE_SIZE);
71661 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
71662 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
71663 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
71664 + } else if (current->signal->curr_ip) {
71665 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
71666 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
71667 + } else if (gr_acl_is_enabled()) {
71668 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
71669 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
71670 + } else {
71671 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
71672 + strcpy(buf, fmt);
71673 + }
71674 +
71675 + return NO_FLOODING;
71676 +}
71677 +
71678 +static void gr_log_middle(int audit, const char *msg, va_list ap)
71679 + __attribute__ ((format (printf, 2, 0)));
71680 +
71681 +static void gr_log_middle(int audit, const char *msg, va_list ap)
71682 +{
71683 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
71684 + unsigned int len = strlen(buf);
71685 +
71686 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
71687 +
71688 + return;
71689 +}
71690 +
71691 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
71692 + __attribute__ ((format (printf, 2, 3)));
71693 +
71694 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
71695 +{
71696 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
71697 + unsigned int len = strlen(buf);
71698 + va_list ap;
71699 +
71700 + va_start(ap, msg);
71701 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
71702 + va_end(ap);
71703 +
71704 + return;
71705 +}
71706 +
71707 +static void gr_log_end(int audit, int append_default)
71708 +{
71709 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
71710 + if (append_default) {
71711 + struct task_struct *task = current;
71712 + struct task_struct *parent = task->real_parent;
71713 + const struct cred *cred = __task_cred(task);
71714 + const struct cred *pcred = __task_cred(parent);
71715 + unsigned int len = strlen(buf);
71716 +
71717 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
71718 + }
71719 +
71720 + printk("%s\n", buf);
71721 +
71722 + return;
71723 +}
71724 +
71725 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
71726 +{
71727 + int logtype;
71728 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
71729 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
71730 + void *voidptr = NULL;
71731 + int num1 = 0, num2 = 0;
71732 + unsigned long ulong1 = 0, ulong2 = 0;
71733 + struct dentry *dentry = NULL;
71734 + struct vfsmount *mnt = NULL;
71735 + struct file *file = NULL;
71736 + struct task_struct *task = NULL;
71737 + struct vm_area_struct *vma = NULL;
71738 + const struct cred *cred, *pcred;
71739 + va_list ap;
71740 +
71741 + BEGIN_LOCKS(audit);
71742 + logtype = gr_log_start(audit);
71743 + if (logtype == FLOODING) {
71744 + END_LOCKS(audit);
71745 + return;
71746 + }
71747 + va_start(ap, argtypes);
71748 + switch (argtypes) {
71749 + case GR_TTYSNIFF:
71750 + task = va_arg(ap, struct task_struct *);
71751 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
71752 + break;
71753 + case GR_SYSCTL_HIDDEN:
71754 + str1 = va_arg(ap, char *);
71755 + gr_log_middle_varargs(audit, msg, result, str1);
71756 + break;
71757 + case GR_RBAC:
71758 + dentry = va_arg(ap, struct dentry *);
71759 + mnt = va_arg(ap, struct vfsmount *);
71760 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
71761 + break;
71762 + case GR_RBAC_STR:
71763 + dentry = va_arg(ap, struct dentry *);
71764 + mnt = va_arg(ap, struct vfsmount *);
71765 + str1 = va_arg(ap, char *);
71766 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
71767 + break;
71768 + case GR_STR_RBAC:
71769 + str1 = va_arg(ap, char *);
71770 + dentry = va_arg(ap, struct dentry *);
71771 + mnt = va_arg(ap, struct vfsmount *);
71772 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
71773 + break;
71774 + case GR_RBAC_MODE2:
71775 + dentry = va_arg(ap, struct dentry *);
71776 + mnt = va_arg(ap, struct vfsmount *);
71777 + str1 = va_arg(ap, char *);
71778 + str2 = va_arg(ap, char *);
71779 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
71780 + break;
71781 + case GR_RBAC_MODE3:
71782 + dentry = va_arg(ap, struct dentry *);
71783 + mnt = va_arg(ap, struct vfsmount *);
71784 + str1 = va_arg(ap, char *);
71785 + str2 = va_arg(ap, char *);
71786 + str3 = va_arg(ap, char *);
71787 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
71788 + break;
71789 + case GR_FILENAME:
71790 + dentry = va_arg(ap, struct dentry *);
71791 + mnt = va_arg(ap, struct vfsmount *);
71792 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
71793 + break;
71794 + case GR_STR_FILENAME:
71795 + str1 = va_arg(ap, char *);
71796 + dentry = va_arg(ap, struct dentry *);
71797 + mnt = va_arg(ap, struct vfsmount *);
71798 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
71799 + break;
71800 + case GR_FILENAME_STR:
71801 + dentry = va_arg(ap, struct dentry *);
71802 + mnt = va_arg(ap, struct vfsmount *);
71803 + str1 = va_arg(ap, char *);
71804 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
71805 + break;
71806 + case GR_FILENAME_TWO_INT:
71807 + dentry = va_arg(ap, struct dentry *);
71808 + mnt = va_arg(ap, struct vfsmount *);
71809 + num1 = va_arg(ap, int);
71810 + num2 = va_arg(ap, int);
71811 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
71812 + break;
71813 + case GR_FILENAME_TWO_INT_STR:
71814 + dentry = va_arg(ap, struct dentry *);
71815 + mnt = va_arg(ap, struct vfsmount *);
71816 + num1 = va_arg(ap, int);
71817 + num2 = va_arg(ap, int);
71818 + str1 = va_arg(ap, char *);
71819 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
71820 + break;
71821 + case GR_TEXTREL:
71822 + file = va_arg(ap, struct file *);
71823 + ulong1 = va_arg(ap, unsigned long);
71824 + ulong2 = va_arg(ap, unsigned long);
71825 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
71826 + break;
71827 + case GR_PTRACE:
71828 + task = va_arg(ap, struct task_struct *);
71829 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
71830 + break;
71831 + case GR_RESOURCE:
71832 + task = va_arg(ap, struct task_struct *);
71833 + cred = __task_cred(task);
71834 + pcred = __task_cred(task->real_parent);
71835 + ulong1 = va_arg(ap, unsigned long);
71836 + str1 = va_arg(ap, char *);
71837 + ulong2 = va_arg(ap, unsigned long);
71838 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
71839 + break;
71840 + case GR_CAP:
71841 + task = va_arg(ap, struct task_struct *);
71842 + cred = __task_cred(task);
71843 + pcred = __task_cred(task->real_parent);
71844 + str1 = va_arg(ap, char *);
71845 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
71846 + break;
71847 + case GR_SIG:
71848 + str1 = va_arg(ap, char *);
71849 + voidptr = va_arg(ap, void *);
71850 + gr_log_middle_varargs(audit, msg, str1, voidptr);
71851 + break;
71852 + case GR_SIG2:
71853 + task = va_arg(ap, struct task_struct *);
71854 + cred = __task_cred(task);
71855 + pcred = __task_cred(task->real_parent);
71856 + num1 = va_arg(ap, int);
71857 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
71858 + break;
71859 + case GR_CRASH1:
71860 + task = va_arg(ap, struct task_struct *);
71861 + cred = __task_cred(task);
71862 + pcred = __task_cred(task->real_parent);
71863 + ulong1 = va_arg(ap, unsigned long);
71864 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
71865 + break;
71866 + case GR_CRASH2:
71867 + task = va_arg(ap, struct task_struct *);
71868 + cred = __task_cred(task);
71869 + pcred = __task_cred(task->real_parent);
71870 + ulong1 = va_arg(ap, unsigned long);
71871 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
71872 + break;
71873 + case GR_RWXMAP:
71874 + file = va_arg(ap, struct file *);
71875 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
71876 + break;
71877 + case GR_RWXMAPVMA:
71878 + vma = va_arg(ap, struct vm_area_struct *);
71879 + if (vma->vm_file)
71880 + str1 = gr_to_filename(vma->vm_file->f_path.dentry, vma->vm_file->f_path.mnt);
71881 + else if (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP))
71882 + str1 = "<stack>";
71883 + else if (vma->vm_start <= current->mm->brk &&
71884 + vma->vm_end >= current->mm->start_brk)
71885 + str1 = "<heap>";
71886 + else
71887 + str1 = "<anonymous mapping>";
71888 + gr_log_middle_varargs(audit, msg, str1);
71889 + break;
71890 + case GR_PSACCT:
71891 + {
71892 + unsigned int wday, cday;
71893 + __u8 whr, chr;
71894 + __u8 wmin, cmin;
71895 + __u8 wsec, csec;
71896 + char cur_tty[64] = { 0 };
71897 + char parent_tty[64] = { 0 };
71898 +
71899 + task = va_arg(ap, struct task_struct *);
71900 + wday = va_arg(ap, unsigned int);
71901 + cday = va_arg(ap, unsigned int);
71902 + whr = va_arg(ap, int);
71903 + chr = va_arg(ap, int);
71904 + wmin = va_arg(ap, int);
71905 + cmin = va_arg(ap, int);
71906 + wsec = va_arg(ap, int);
71907 + csec = va_arg(ap, int);
71908 + ulong1 = va_arg(ap, unsigned long);
71909 + cred = __task_cred(task);
71910 + pcred = __task_cred(task->real_parent);
71911 +
71912 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
71913 + }
71914 + break;
71915 + default:
71916 + gr_log_middle(audit, msg, ap);
71917 + }
71918 + va_end(ap);
71919 + // these don't need DEFAULTSECARGS printed on the end
71920 + if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
71921 + gr_log_end(audit, 0);
71922 + else
71923 + gr_log_end(audit, 1);
71924 + END_LOCKS(audit);
71925 +}
71926 diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
71927 new file mode 100644
71928 index 0000000..f536303
71929 --- /dev/null
71930 +++ b/grsecurity/grsec_mem.c
71931 @@ -0,0 +1,40 @@
71932 +#include <linux/kernel.h>
71933 +#include <linux/sched.h>
71934 +#include <linux/mm.h>
71935 +#include <linux/mman.h>
71936 +#include <linux/grinternal.h>
71937 +
71938 +void
71939 +gr_handle_ioperm(void)
71940 +{
71941 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
71942 + return;
71943 +}
71944 +
71945 +void
71946 +gr_handle_iopl(void)
71947 +{
71948 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
71949 + return;
71950 +}
71951 +
71952 +void
71953 +gr_handle_mem_readwrite(u64 from, u64 to)
71954 +{
71955 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
71956 + return;
71957 +}
71958 +
71959 +void
71960 +gr_handle_vm86(void)
71961 +{
71962 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
71963 + return;
71964 +}
71965 +
71966 +void
71967 +gr_log_badprocpid(const char *entry)
71968 +{
71969 + gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
71970 + return;
71971 +}
71972 diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
71973 new file mode 100644
71974 index 0000000..2131422
71975 --- /dev/null
71976 +++ b/grsecurity/grsec_mount.c
71977 @@ -0,0 +1,62 @@
71978 +#include <linux/kernel.h>
71979 +#include <linux/sched.h>
71980 +#include <linux/mount.h>
71981 +#include <linux/grsecurity.h>
71982 +#include <linux/grinternal.h>
71983 +
71984 +void
71985 +gr_log_remount(const char *devname, const int retval)
71986 +{
71987 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
71988 + if (grsec_enable_mount && (retval >= 0))
71989 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
71990 +#endif
71991 + return;
71992 +}
71993 +
71994 +void
71995 +gr_log_unmount(const char *devname, const int retval)
71996 +{
71997 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
71998 + if (grsec_enable_mount && (retval >= 0))
71999 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
72000 +#endif
72001 + return;
72002 +}
72003 +
72004 +void
72005 +gr_log_mount(const char *from, const char *to, const int retval)
72006 +{
72007 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
72008 + if (grsec_enable_mount && (retval >= 0))
72009 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
72010 +#endif
72011 + return;
72012 +}
72013 +
72014 +int
72015 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
72016 +{
72017 +#ifdef CONFIG_GRKERNSEC_ROFS
72018 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
72019 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
72020 + return -EPERM;
72021 + } else
72022 + return 0;
72023 +#endif
72024 + return 0;
72025 +}
72026 +
72027 +int
72028 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
72029 +{
72030 +#ifdef CONFIG_GRKERNSEC_ROFS
72031 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
72032 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
72033 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
72034 + return -EPERM;
72035 + } else
72036 + return 0;
72037 +#endif
72038 + return 0;
72039 +}
72040 diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
72041 new file mode 100644
72042 index 0000000..6ee9d50
72043 --- /dev/null
72044 +++ b/grsecurity/grsec_pax.c
72045 @@ -0,0 +1,45 @@
72046 +#include <linux/kernel.h>
72047 +#include <linux/sched.h>
72048 +#include <linux/mm.h>
72049 +#include <linux/file.h>
72050 +#include <linux/grinternal.h>
72051 +#include <linux/grsecurity.h>
72052 +
72053 +void
72054 +gr_log_textrel(struct vm_area_struct * vma)
72055 +{
72056 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
72057 + if (grsec_enable_log_rwxmaps)
72058 + gr_log_textrel_ulong_ulong(GR_DONT_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
72059 +#endif
72060 + return;
72061 +}
72062 +
72063 +void gr_log_ptgnustack(struct file *file)
72064 +{
72065 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
72066 + if (grsec_enable_log_rwxmaps)
72067 + gr_log_rwxmap(GR_DONT_AUDIT, GR_PTGNUSTACK_MSG, file);
72068 +#endif
72069 + return;
72070 +}
72071 +
72072 +void
72073 +gr_log_rwxmmap(struct file *file)
72074 +{
72075 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
72076 + if (grsec_enable_log_rwxmaps)
72077 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
72078 +#endif
72079 + return;
72080 +}
72081 +
72082 +void
72083 +gr_log_rwxmprotect(struct vm_area_struct *vma)
72084 +{
72085 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
72086 + if (grsec_enable_log_rwxmaps)
72087 + gr_log_rwxmap_vma(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, vma);
72088 +#endif
72089 + return;
72090 +}
72091 diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
72092 new file mode 100644
72093 index 0000000..f7f29aa
72094 --- /dev/null
72095 +++ b/grsecurity/grsec_ptrace.c
72096 @@ -0,0 +1,30 @@
72097 +#include <linux/kernel.h>
72098 +#include <linux/sched.h>
72099 +#include <linux/grinternal.h>
72100 +#include <linux/security.h>
72101 +
72102 +void
72103 +gr_audit_ptrace(struct task_struct *task)
72104 +{
72105 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
72106 + if (grsec_enable_audit_ptrace)
72107 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
72108 +#endif
72109 + return;
72110 +}
72111 +
72112 +int
72113 +gr_ptrace_readexec(struct file *file, int unsafe_flags)
72114 +{
72115 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
72116 + const struct dentry *dentry = file->f_path.dentry;
72117 + const struct vfsmount *mnt = file->f_path.mnt;
72118 +
72119 + if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
72120 + (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
72121 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
72122 + return -EACCES;
72123 + }
72124 +#endif
72125 + return 0;
72126 +}
72127 diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
72128 new file mode 100644
72129 index 0000000..3860c7e
72130 --- /dev/null
72131 +++ b/grsecurity/grsec_sig.c
72132 @@ -0,0 +1,236 @@
72133 +#include <linux/kernel.h>
72134 +#include <linux/sched.h>
72135 +#include <linux/fs.h>
72136 +#include <linux/delay.h>
72137 +#include <linux/grsecurity.h>
72138 +#include <linux/grinternal.h>
72139 +#include <linux/hardirq.h>
72140 +
72141 +char *signames[] = {
72142 + [SIGSEGV] = "Segmentation fault",
72143 + [SIGILL] = "Illegal instruction",
72144 + [SIGABRT] = "Abort",
72145 + [SIGBUS] = "Invalid alignment/Bus error"
72146 +};
72147 +
72148 +void
72149 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
72150 +{
72151 +#ifdef CONFIG_GRKERNSEC_SIGNAL
72152 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
72153 + (sig == SIGABRT) || (sig == SIGBUS))) {
72154 + if (task_pid_nr(t) == task_pid_nr(current)) {
72155 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
72156 + } else {
72157 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
72158 + }
72159 + }
72160 +#endif
72161 + return;
72162 +}
72163 +
72164 +int
72165 +gr_handle_signal(const struct task_struct *p, const int sig)
72166 +{
72167 +#ifdef CONFIG_GRKERNSEC
72168 + /* ignore the 0 signal for protected task checks */
72169 + if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
72170 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
72171 + return -EPERM;
72172 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
72173 + return -EPERM;
72174 + }
72175 +#endif
72176 + return 0;
72177 +}
72178 +
72179 +#ifdef CONFIG_GRKERNSEC
72180 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
72181 +
72182 +int gr_fake_force_sig(int sig, struct task_struct *t)
72183 +{
72184 + unsigned long int flags;
72185 + int ret, blocked, ignored;
72186 + struct k_sigaction *action;
72187 +
72188 + spin_lock_irqsave(&t->sighand->siglock, flags);
72189 + action = &t->sighand->action[sig-1];
72190 + ignored = action->sa.sa_handler == SIG_IGN;
72191 + blocked = sigismember(&t->blocked, sig);
72192 + if (blocked || ignored) {
72193 + action->sa.sa_handler = SIG_DFL;
72194 + if (blocked) {
72195 + sigdelset(&t->blocked, sig);
72196 + recalc_sigpending_and_wake(t);
72197 + }
72198 + }
72199 + if (action->sa.sa_handler == SIG_DFL)
72200 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
72201 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
72202 +
72203 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
72204 +
72205 + return ret;
72206 +}
72207 +#endif
72208 +
72209 +#define GR_USER_BAN_TIME (15 * 60)
72210 +#define GR_DAEMON_BRUTE_TIME (30 * 60)
72211 +
72212 +void gr_handle_brute_attach(int dumpable)
72213 +{
72214 +#ifdef CONFIG_GRKERNSEC_BRUTE
72215 + struct task_struct *p = current;
72216 + kuid_t uid = GLOBAL_ROOT_UID;
72217 + int daemon = 0;
72218 +
72219 + if (!grsec_enable_brute)
72220 + return;
72221 +
72222 + rcu_read_lock();
72223 + read_lock(&tasklist_lock);
72224 + read_lock(&grsec_exec_file_lock);
72225 + if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
72226 + p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
72227 + p->real_parent->brute = 1;
72228 + daemon = 1;
72229 + } else {
72230 + const struct cred *cred = __task_cred(p), *cred2;
72231 + struct task_struct *tsk, *tsk2;
72232 +
72233 + if (dumpable != SUID_DUMP_USER && gr_is_global_nonroot(cred->uid)) {
72234 + struct user_struct *user;
72235 +
72236 + uid = cred->uid;
72237 +
72238 + /* this is put upon execution past expiration */
72239 + user = find_user(uid);
72240 + if (user == NULL)
72241 + goto unlock;
72242 + user->suid_banned = 1;
72243 + user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
72244 + if (user->suid_ban_expires == ~0UL)
72245 + user->suid_ban_expires--;
72246 +
72247 + /* only kill other threads of the same binary, from the same user */
72248 + do_each_thread(tsk2, tsk) {
72249 + cred2 = __task_cred(tsk);
72250 + if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
72251 + gr_fake_force_sig(SIGKILL, tsk);
72252 + } while_each_thread(tsk2, tsk);
72253 + }
72254 + }
72255 +unlock:
72256 + read_unlock(&grsec_exec_file_lock);
72257 + read_unlock(&tasklist_lock);
72258 + rcu_read_unlock();
72259 +
72260 + if (gr_is_global_nonroot(uid))
72261 + gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
72262 + else if (daemon)
72263 + gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
72264 +
72265 +#endif
72266 + return;
72267 +}
72268 +
72269 +void gr_handle_brute_check(void)
72270 +{
72271 +#ifdef CONFIG_GRKERNSEC_BRUTE
72272 + struct task_struct *p = current;
72273 +
72274 + if (unlikely(p->brute)) {
72275 + if (!grsec_enable_brute)
72276 + p->brute = 0;
72277 + else if (time_before(get_seconds(), p->brute_expires))
72278 + msleep(30 * 1000);
72279 + }
72280 +#endif
72281 + return;
72282 +}
72283 +
72284 +void gr_handle_kernel_exploit(void)
72285 +{
72286 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
72287 + const struct cred *cred;
72288 + struct task_struct *tsk, *tsk2;
72289 + struct user_struct *user;
72290 + kuid_t uid;
72291 +
72292 + if (in_irq() || in_serving_softirq() || in_nmi())
72293 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
72294 +
72295 + uid = current_uid();
72296 +
72297 + if (gr_is_global_root(uid))
72298 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
72299 + else {
72300 + /* kill all the processes of this user, hold a reference
72301 + to their creds struct, and prevent them from creating
72302 + another process until system reset
72303 + */
72304 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
72305 + GR_GLOBAL_UID(uid));
72306 + /* we intentionally leak this ref */
72307 + user = get_uid(current->cred->user);
72308 + if (user)
72309 + user->kernel_banned = 1;
72310 +
72311 + /* kill all processes of this user */
72312 + read_lock(&tasklist_lock);
72313 + do_each_thread(tsk2, tsk) {
72314 + cred = __task_cred(tsk);
72315 + if (uid_eq(cred->uid, uid))
72316 + gr_fake_force_sig(SIGKILL, tsk);
72317 + } while_each_thread(tsk2, tsk);
72318 + read_unlock(&tasklist_lock);
72319 + }
72320 +#endif
72321 +}
72322 +
72323 +#ifdef CONFIG_GRKERNSEC_BRUTE
72324 +static bool suid_ban_expired(struct user_struct *user)
72325 +{
72326 + if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
72327 + user->suid_banned = 0;
72328 + user->suid_ban_expires = 0;
72329 + free_uid(user);
72330 + return true;
72331 + }
72332 +
72333 + return false;
72334 +}
72335 +#endif
72336 +
72337 +int gr_process_kernel_exec_ban(void)
72338 +{
72339 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
72340 + if (unlikely(current->cred->user->kernel_banned))
72341 + return -EPERM;
72342 +#endif
72343 + return 0;
72344 +}
72345 +
72346 +int gr_process_kernel_setuid_ban(struct user_struct *user)
72347 +{
72348 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
72349 + if (unlikely(user->kernel_banned))
72350 + gr_fake_force_sig(SIGKILL, current);
72351 +#endif
72352 + return 0;
72353 +}
72354 +
72355 +int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
72356 +{
72357 +#ifdef CONFIG_GRKERNSEC_BRUTE
72358 + struct user_struct *user = current->cred->user;
72359 + if (unlikely(user->suid_banned)) {
72360 + if (suid_ban_expired(user))
72361 + return 0;
72362 + /* disallow execution of suid binaries only */
72363 + else if (!uid_eq(bprm->cred->euid, current->cred->uid))
72364 + return -EPERM;
72365 + }
72366 +#endif
72367 + return 0;
72368 +}
72369 diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
72370 new file mode 100644
72371 index 0000000..4030d57
72372 --- /dev/null
72373 +++ b/grsecurity/grsec_sock.c
72374 @@ -0,0 +1,244 @@
72375 +#include <linux/kernel.h>
72376 +#include <linux/module.h>
72377 +#include <linux/sched.h>
72378 +#include <linux/file.h>
72379 +#include <linux/net.h>
72380 +#include <linux/in.h>
72381 +#include <linux/ip.h>
72382 +#include <net/sock.h>
72383 +#include <net/inet_sock.h>
72384 +#include <linux/grsecurity.h>
72385 +#include <linux/grinternal.h>
72386 +#include <linux/gracl.h>
72387 +
72388 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
72389 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
72390 +
72391 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
72392 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
72393 +
72394 +#ifdef CONFIG_UNIX_MODULE
72395 +EXPORT_SYMBOL(gr_acl_handle_unix);
72396 +EXPORT_SYMBOL(gr_acl_handle_mknod);
72397 +EXPORT_SYMBOL(gr_handle_chroot_unix);
72398 +EXPORT_SYMBOL(gr_handle_create);
72399 +#endif
72400 +
72401 +#ifdef CONFIG_GRKERNSEC
72402 +#define gr_conn_table_size 32749
72403 +struct conn_table_entry {
72404 + struct conn_table_entry *next;
72405 + struct signal_struct *sig;
72406 +};
72407 +
72408 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
72409 +DEFINE_SPINLOCK(gr_conn_table_lock);
72410 +
72411 +extern const char * gr_socktype_to_name(unsigned char type);
72412 +extern const char * gr_proto_to_name(unsigned char proto);
72413 +extern const char * gr_sockfamily_to_name(unsigned char family);
72414 +
72415 +static __inline__ int
72416 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
72417 +{
72418 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
72419 +}
72420 +
72421 +static __inline__ int
72422 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
72423 + __u16 sport, __u16 dport)
72424 +{
72425 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
72426 + sig->gr_sport == sport && sig->gr_dport == dport))
72427 + return 1;
72428 + else
72429 + return 0;
72430 +}
72431 +
72432 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
72433 +{
72434 + struct conn_table_entry **match;
72435 + unsigned int index;
72436 +
72437 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
72438 + sig->gr_sport, sig->gr_dport,
72439 + gr_conn_table_size);
72440 +
72441 + newent->sig = sig;
72442 +
72443 + match = &gr_conn_table[index];
72444 + newent->next = *match;
72445 + *match = newent;
72446 +
72447 + return;
72448 +}
72449 +
72450 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
72451 +{
72452 + struct conn_table_entry *match, *last = NULL;
72453 + unsigned int index;
72454 +
72455 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
72456 + sig->gr_sport, sig->gr_dport,
72457 + gr_conn_table_size);
72458 +
72459 + match = gr_conn_table[index];
72460 + while (match && !conn_match(match->sig,
72461 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
72462 + sig->gr_dport)) {
72463 + last = match;
72464 + match = match->next;
72465 + }
72466 +
72467 + if (match) {
72468 + if (last)
72469 + last->next = match->next;
72470 + else
72471 + gr_conn_table[index] = NULL;
72472 + kfree(match);
72473 + }
72474 +
72475 + return;
72476 +}
72477 +
72478 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
72479 + __u16 sport, __u16 dport)
72480 +{
72481 + struct conn_table_entry *match;
72482 + unsigned int index;
72483 +
72484 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
72485 +
72486 + match = gr_conn_table[index];
72487 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
72488 + match = match->next;
72489 +
72490 + if (match)
72491 + return match->sig;
72492 + else
72493 + return NULL;
72494 +}
72495 +
72496 +#endif
72497 +
72498 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
72499 +{
72500 +#ifdef CONFIG_GRKERNSEC
72501 + struct signal_struct *sig = task->signal;
72502 + struct conn_table_entry *newent;
72503 +
72504 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
72505 + if (newent == NULL)
72506 + return;
72507 + /* no bh lock needed since we are called with bh disabled */
72508 + spin_lock(&gr_conn_table_lock);
72509 + gr_del_task_from_ip_table_nolock(sig);
72510 + sig->gr_saddr = inet->inet_rcv_saddr;
72511 + sig->gr_daddr = inet->inet_daddr;
72512 + sig->gr_sport = inet->inet_sport;
72513 + sig->gr_dport = inet->inet_dport;
72514 + gr_add_to_task_ip_table_nolock(sig, newent);
72515 + spin_unlock(&gr_conn_table_lock);
72516 +#endif
72517 + return;
72518 +}
72519 +
72520 +void gr_del_task_from_ip_table(struct task_struct *task)
72521 +{
72522 +#ifdef CONFIG_GRKERNSEC
72523 + spin_lock_bh(&gr_conn_table_lock);
72524 + gr_del_task_from_ip_table_nolock(task->signal);
72525 + spin_unlock_bh(&gr_conn_table_lock);
72526 +#endif
72527 + return;
72528 +}
72529 +
72530 +void
72531 +gr_attach_curr_ip(const struct sock *sk)
72532 +{
72533 +#ifdef CONFIG_GRKERNSEC
72534 + struct signal_struct *p, *set;
72535 + const struct inet_sock *inet = inet_sk(sk);
72536 +
72537 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
72538 + return;
72539 +
72540 + set = current->signal;
72541 +
72542 + spin_lock_bh(&gr_conn_table_lock);
72543 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
72544 + inet->inet_dport, inet->inet_sport);
72545 + if (unlikely(p != NULL)) {
72546 + set->curr_ip = p->curr_ip;
72547 + set->used_accept = 1;
72548 + gr_del_task_from_ip_table_nolock(p);
72549 + spin_unlock_bh(&gr_conn_table_lock);
72550 + return;
72551 + }
72552 + spin_unlock_bh(&gr_conn_table_lock);
72553 +
72554 + set->curr_ip = inet->inet_daddr;
72555 + set->used_accept = 1;
72556 +#endif
72557 + return;
72558 +}
72559 +
72560 +int
72561 +gr_handle_sock_all(const int family, const int type, const int protocol)
72562 +{
72563 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
72564 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
72565 + (family != AF_UNIX)) {
72566 + if (family == AF_INET)
72567 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
72568 + else
72569 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
72570 + return -EACCES;
72571 + }
72572 +#endif
72573 + return 0;
72574 +}
72575 +
72576 +int
72577 +gr_handle_sock_server(const struct sockaddr *sck)
72578 +{
72579 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
72580 + if (grsec_enable_socket_server &&
72581 + in_group_p(grsec_socket_server_gid) &&
72582 + sck && (sck->sa_family != AF_UNIX) &&
72583 + (sck->sa_family != AF_LOCAL)) {
72584 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
72585 + return -EACCES;
72586 + }
72587 +#endif
72588 + return 0;
72589 +}
72590 +
72591 +int
72592 +gr_handle_sock_server_other(const struct sock *sck)
72593 +{
72594 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
72595 + if (grsec_enable_socket_server &&
72596 + in_group_p(grsec_socket_server_gid) &&
72597 + sck && (sck->sk_family != AF_UNIX) &&
72598 + (sck->sk_family != AF_LOCAL)) {
72599 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
72600 + return -EACCES;
72601 + }
72602 +#endif
72603 + return 0;
72604 +}
72605 +
72606 +int
72607 +gr_handle_sock_client(const struct sockaddr *sck)
72608 +{
72609 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
72610 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
72611 + sck && (sck->sa_family != AF_UNIX) &&
72612 + (sck->sa_family != AF_LOCAL)) {
72613 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
72614 + return -EACCES;
72615 + }
72616 +#endif
72617 + return 0;
72618 +}
72619 diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
72620 new file mode 100644
72621 index 0000000..8159888
72622 --- /dev/null
72623 +++ b/grsecurity/grsec_sysctl.c
72624 @@ -0,0 +1,479 @@
72625 +#include <linux/kernel.h>
72626 +#include <linux/sched.h>
72627 +#include <linux/sysctl.h>
72628 +#include <linux/grsecurity.h>
72629 +#include <linux/grinternal.h>
72630 +
72631 +int
72632 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
72633 +{
72634 +#ifdef CONFIG_GRKERNSEC_SYSCTL
72635 + if (dirname == NULL || name == NULL)
72636 + return 0;
72637 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
72638 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
72639 + return -EACCES;
72640 + }
72641 +#endif
72642 + return 0;
72643 +}
72644 +
72645 +#if defined(CONFIG_GRKERNSEC_ROFS) || defined(CONFIG_GRKERNSEC_DENYUSB)
72646 +static int __maybe_unused __read_only one = 1;
72647 +#endif
72648 +
72649 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) || \
72650 + defined(CONFIG_GRKERNSEC_DENYUSB)
72651 +struct ctl_table grsecurity_table[] = {
72652 +#ifdef CONFIG_GRKERNSEC_SYSCTL
72653 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
72654 +#ifdef CONFIG_GRKERNSEC_IO
72655 + {
72656 + .procname = "disable_priv_io",
72657 + .data = &grsec_disable_privio,
72658 + .maxlen = sizeof(int),
72659 + .mode = 0600,
72660 + .proc_handler = &proc_dointvec,
72661 + },
72662 +#endif
72663 +#endif
72664 +#ifdef CONFIG_GRKERNSEC_LINK
72665 + {
72666 + .procname = "linking_restrictions",
72667 + .data = &grsec_enable_link,
72668 + .maxlen = sizeof(int),
72669 + .mode = 0600,
72670 + .proc_handler = &proc_dointvec,
72671 + },
72672 +#endif
72673 +#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
72674 + {
72675 + .procname = "enforce_symlinksifowner",
72676 + .data = &grsec_enable_symlinkown,
72677 + .maxlen = sizeof(int),
72678 + .mode = 0600,
72679 + .proc_handler = &proc_dointvec,
72680 + },
72681 + {
72682 + .procname = "symlinkown_gid",
72683 + .data = &grsec_symlinkown_gid,
72684 + .maxlen = sizeof(int),
72685 + .mode = 0600,
72686 + .proc_handler = &proc_dointvec,
72687 + },
72688 +#endif
72689 +#ifdef CONFIG_GRKERNSEC_BRUTE
72690 + {
72691 + .procname = "deter_bruteforce",
72692 + .data = &grsec_enable_brute,
72693 + .maxlen = sizeof(int),
72694 + .mode = 0600,
72695 + .proc_handler = &proc_dointvec,
72696 + },
72697 +#endif
72698 +#ifdef CONFIG_GRKERNSEC_FIFO
72699 + {
72700 + .procname = "fifo_restrictions",
72701 + .data = &grsec_enable_fifo,
72702 + .maxlen = sizeof(int),
72703 + .mode = 0600,
72704 + .proc_handler = &proc_dointvec,
72705 + },
72706 +#endif
72707 +#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
72708 + {
72709 + .procname = "ptrace_readexec",
72710 + .data = &grsec_enable_ptrace_readexec,
72711 + .maxlen = sizeof(int),
72712 + .mode = 0600,
72713 + .proc_handler = &proc_dointvec,
72714 + },
72715 +#endif
72716 +#ifdef CONFIG_GRKERNSEC_SETXID
72717 + {
72718 + .procname = "consistent_setxid",
72719 + .data = &grsec_enable_setxid,
72720 + .maxlen = sizeof(int),
72721 + .mode = 0600,
72722 + .proc_handler = &proc_dointvec,
72723 + },
72724 +#endif
72725 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72726 + {
72727 + .procname = "ip_blackhole",
72728 + .data = &grsec_enable_blackhole,
72729 + .maxlen = sizeof(int),
72730 + .mode = 0600,
72731 + .proc_handler = &proc_dointvec,
72732 + },
72733 + {
72734 + .procname = "lastack_retries",
72735 + .data = &grsec_lastack_retries,
72736 + .maxlen = sizeof(int),
72737 + .mode = 0600,
72738 + .proc_handler = &proc_dointvec,
72739 + },
72740 +#endif
72741 +#ifdef CONFIG_GRKERNSEC_EXECLOG
72742 + {
72743 + .procname = "exec_logging",
72744 + .data = &grsec_enable_execlog,
72745 + .maxlen = sizeof(int),
72746 + .mode = 0600,
72747 + .proc_handler = &proc_dointvec,
72748 + },
72749 +#endif
72750 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
72751 + {
72752 + .procname = "rwxmap_logging",
72753 + .data = &grsec_enable_log_rwxmaps,
72754 + .maxlen = sizeof(int),
72755 + .mode = 0600,
72756 + .proc_handler = &proc_dointvec,
72757 + },
72758 +#endif
72759 +#ifdef CONFIG_GRKERNSEC_SIGNAL
72760 + {
72761 + .procname = "signal_logging",
72762 + .data = &grsec_enable_signal,
72763 + .maxlen = sizeof(int),
72764 + .mode = 0600,
72765 + .proc_handler = &proc_dointvec,
72766 + },
72767 +#endif
72768 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
72769 + {
72770 + .procname = "forkfail_logging",
72771 + .data = &grsec_enable_forkfail,
72772 + .maxlen = sizeof(int),
72773 + .mode = 0600,
72774 + .proc_handler = &proc_dointvec,
72775 + },
72776 +#endif
72777 +#ifdef CONFIG_GRKERNSEC_TIME
72778 + {
72779 + .procname = "timechange_logging",
72780 + .data = &grsec_enable_time,
72781 + .maxlen = sizeof(int),
72782 + .mode = 0600,
72783 + .proc_handler = &proc_dointvec,
72784 + },
72785 +#endif
72786 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
72787 + {
72788 + .procname = "chroot_deny_shmat",
72789 + .data = &grsec_enable_chroot_shmat,
72790 + .maxlen = sizeof(int),
72791 + .mode = 0600,
72792 + .proc_handler = &proc_dointvec,
72793 + },
72794 +#endif
72795 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
72796 + {
72797 + .procname = "chroot_deny_unix",
72798 + .data = &grsec_enable_chroot_unix,
72799 + .maxlen = sizeof(int),
72800 + .mode = 0600,
72801 + .proc_handler = &proc_dointvec,
72802 + },
72803 +#endif
72804 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
72805 + {
72806 + .procname = "chroot_deny_mount",
72807 + .data = &grsec_enable_chroot_mount,
72808 + .maxlen = sizeof(int),
72809 + .mode = 0600,
72810 + .proc_handler = &proc_dointvec,
72811 + },
72812 +#endif
72813 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
72814 + {
72815 + .procname = "chroot_deny_fchdir",
72816 + .data = &grsec_enable_chroot_fchdir,
72817 + .maxlen = sizeof(int),
72818 + .mode = 0600,
72819 + .proc_handler = &proc_dointvec,
72820 + },
72821 +#endif
72822 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
72823 + {
72824 + .procname = "chroot_deny_chroot",
72825 + .data = &grsec_enable_chroot_double,
72826 + .maxlen = sizeof(int),
72827 + .mode = 0600,
72828 + .proc_handler = &proc_dointvec,
72829 + },
72830 +#endif
72831 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
72832 + {
72833 + .procname = "chroot_deny_pivot",
72834 + .data = &grsec_enable_chroot_pivot,
72835 + .maxlen = sizeof(int),
72836 + .mode = 0600,
72837 + .proc_handler = &proc_dointvec,
72838 + },
72839 +#endif
72840 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
72841 + {
72842 + .procname = "chroot_enforce_chdir",
72843 + .data = &grsec_enable_chroot_chdir,
72844 + .maxlen = sizeof(int),
72845 + .mode = 0600,
72846 + .proc_handler = &proc_dointvec,
72847 + },
72848 +#endif
72849 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
72850 + {
72851 + .procname = "chroot_deny_chmod",
72852 + .data = &grsec_enable_chroot_chmod,
72853 + .maxlen = sizeof(int),
72854 + .mode = 0600,
72855 + .proc_handler = &proc_dointvec,
72856 + },
72857 +#endif
72858 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
72859 + {
72860 + .procname = "chroot_deny_mknod",
72861 + .data = &grsec_enable_chroot_mknod,
72862 + .maxlen = sizeof(int),
72863 + .mode = 0600,
72864 + .proc_handler = &proc_dointvec,
72865 + },
72866 +#endif
72867 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
72868 + {
72869 + .procname = "chroot_restrict_nice",
72870 + .data = &grsec_enable_chroot_nice,
72871 + .maxlen = sizeof(int),
72872 + .mode = 0600,
72873 + .proc_handler = &proc_dointvec,
72874 + },
72875 +#endif
72876 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
72877 + {
72878 + .procname = "chroot_execlog",
72879 + .data = &grsec_enable_chroot_execlog,
72880 + .maxlen = sizeof(int),
72881 + .mode = 0600,
72882 + .proc_handler = &proc_dointvec,
72883 + },
72884 +#endif
72885 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
72886 + {
72887 + .procname = "chroot_caps",
72888 + .data = &grsec_enable_chroot_caps,
72889 + .maxlen = sizeof(int),
72890 + .mode = 0600,
72891 + .proc_handler = &proc_dointvec,
72892 + },
72893 +#endif
72894 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
72895 + {
72896 + .procname = "chroot_deny_sysctl",
72897 + .data = &grsec_enable_chroot_sysctl,
72898 + .maxlen = sizeof(int),
72899 + .mode = 0600,
72900 + .proc_handler = &proc_dointvec,
72901 + },
72902 +#endif
72903 +#ifdef CONFIG_GRKERNSEC_TPE
72904 + {
72905 + .procname = "tpe",
72906 + .data = &grsec_enable_tpe,
72907 + .maxlen = sizeof(int),
72908 + .mode = 0600,
72909 + .proc_handler = &proc_dointvec,
72910 + },
72911 + {
72912 + .procname = "tpe_gid",
72913 + .data = &grsec_tpe_gid,
72914 + .maxlen = sizeof(int),
72915 + .mode = 0600,
72916 + .proc_handler = &proc_dointvec,
72917 + },
72918 +#endif
72919 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
72920 + {
72921 + .procname = "tpe_invert",
72922 + .data = &grsec_enable_tpe_invert,
72923 + .maxlen = sizeof(int),
72924 + .mode = 0600,
72925 + .proc_handler = &proc_dointvec,
72926 + },
72927 +#endif
72928 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
72929 + {
72930 + .procname = "tpe_restrict_all",
72931 + .data = &grsec_enable_tpe_all,
72932 + .maxlen = sizeof(int),
72933 + .mode = 0600,
72934 + .proc_handler = &proc_dointvec,
72935 + },
72936 +#endif
72937 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
72938 + {
72939 + .procname = "socket_all",
72940 + .data = &grsec_enable_socket_all,
72941 + .maxlen = sizeof(int),
72942 + .mode = 0600,
72943 + .proc_handler = &proc_dointvec,
72944 + },
72945 + {
72946 + .procname = "socket_all_gid",
72947 + .data = &grsec_socket_all_gid,
72948 + .maxlen = sizeof(int),
72949 + .mode = 0600,
72950 + .proc_handler = &proc_dointvec,
72951 + },
72952 +#endif
72953 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
72954 + {
72955 + .procname = "socket_client",
72956 + .data = &grsec_enable_socket_client,
72957 + .maxlen = sizeof(int),
72958 + .mode = 0600,
72959 + .proc_handler = &proc_dointvec,
72960 + },
72961 + {
72962 + .procname = "socket_client_gid",
72963 + .data = &grsec_socket_client_gid,
72964 + .maxlen = sizeof(int),
72965 + .mode = 0600,
72966 + .proc_handler = &proc_dointvec,
72967 + },
72968 +#endif
72969 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
72970 + {
72971 + .procname = "socket_server",
72972 + .data = &grsec_enable_socket_server,
72973 + .maxlen = sizeof(int),
72974 + .mode = 0600,
72975 + .proc_handler = &proc_dointvec,
72976 + },
72977 + {
72978 + .procname = "socket_server_gid",
72979 + .data = &grsec_socket_server_gid,
72980 + .maxlen = sizeof(int),
72981 + .mode = 0600,
72982 + .proc_handler = &proc_dointvec,
72983 + },
72984 +#endif
72985 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
72986 + {
72987 + .procname = "audit_group",
72988 + .data = &grsec_enable_group,
72989 + .maxlen = sizeof(int),
72990 + .mode = 0600,
72991 + .proc_handler = &proc_dointvec,
72992 + },
72993 + {
72994 + .procname = "audit_gid",
72995 + .data = &grsec_audit_gid,
72996 + .maxlen = sizeof(int),
72997 + .mode = 0600,
72998 + .proc_handler = &proc_dointvec,
72999 + },
73000 +#endif
73001 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
73002 + {
73003 + .procname = "audit_chdir",
73004 + .data = &grsec_enable_chdir,
73005 + .maxlen = sizeof(int),
73006 + .mode = 0600,
73007 + .proc_handler = &proc_dointvec,
73008 + },
73009 +#endif
73010 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
73011 + {
73012 + .procname = "audit_mount",
73013 + .data = &grsec_enable_mount,
73014 + .maxlen = sizeof(int),
73015 + .mode = 0600,
73016 + .proc_handler = &proc_dointvec,
73017 + },
73018 +#endif
73019 +#ifdef CONFIG_GRKERNSEC_DMESG
73020 + {
73021 + .procname = "dmesg",
73022 + .data = &grsec_enable_dmesg,
73023 + .maxlen = sizeof(int),
73024 + .mode = 0600,
73025 + .proc_handler = &proc_dointvec,
73026 + },
73027 +#endif
73028 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
73029 + {
73030 + .procname = "chroot_findtask",
73031 + .data = &grsec_enable_chroot_findtask,
73032 + .maxlen = sizeof(int),
73033 + .mode = 0600,
73034 + .proc_handler = &proc_dointvec,
73035 + },
73036 +#endif
73037 +#ifdef CONFIG_GRKERNSEC_RESLOG
73038 + {
73039 + .procname = "resource_logging",
73040 + .data = &grsec_resource_logging,
73041 + .maxlen = sizeof(int),
73042 + .mode = 0600,
73043 + .proc_handler = &proc_dointvec,
73044 + },
73045 +#endif
73046 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
73047 + {
73048 + .procname = "audit_ptrace",
73049 + .data = &grsec_enable_audit_ptrace,
73050 + .maxlen = sizeof(int),
73051 + .mode = 0600,
73052 + .proc_handler = &proc_dointvec,
73053 + },
73054 +#endif
73055 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
73056 + {
73057 + .procname = "harden_ptrace",
73058 + .data = &grsec_enable_harden_ptrace,
73059 + .maxlen = sizeof(int),
73060 + .mode = 0600,
73061 + .proc_handler = &proc_dointvec,
73062 + },
73063 +#endif
73064 +#ifdef CONFIG_GRKERNSEC_HARDEN_IPC
73065 + {
73066 + .procname = "harden_ipc",
73067 + .data = &grsec_enable_harden_ipc,
73068 + .maxlen = sizeof(int),
73069 + .mode = 0600,
73070 + .proc_handler = &proc_dointvec,
73071 + },
73072 +#endif
73073 + {
73074 + .procname = "grsec_lock",
73075 + .data = &grsec_lock,
73076 + .maxlen = sizeof(int),
73077 + .mode = 0600,
73078 + .proc_handler = &proc_dointvec,
73079 + },
73080 +#endif
73081 +#ifdef CONFIG_GRKERNSEC_ROFS
73082 + {
73083 + .procname = "romount_protect",
73084 + .data = &grsec_enable_rofs,
73085 + .maxlen = sizeof(int),
73086 + .mode = 0600,
73087 + .proc_handler = &proc_dointvec_minmax,
73088 + .extra1 = &one,
73089 + .extra2 = &one,
73090 + },
73091 +#endif
73092 +#if defined(CONFIG_GRKERNSEC_DENYUSB) && !defined(CONFIG_GRKERNSEC_DENYUSB_FORCE)
73093 + {
73094 + .procname = "deny_new_usb",
73095 + .data = &grsec_deny_new_usb,
73096 + .maxlen = sizeof(int),
73097 + .mode = 0600,
73098 + .proc_handler = &proc_dointvec,
73099 + },
73100 +#endif
73101 + { }
73102 +};
73103 +#endif
73104 diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
73105 new file mode 100644
73106 index 0000000..0dc13c3
73107 --- /dev/null
73108 +++ b/grsecurity/grsec_time.c
73109 @@ -0,0 +1,16 @@
73110 +#include <linux/kernel.h>
73111 +#include <linux/sched.h>
73112 +#include <linux/grinternal.h>
73113 +#include <linux/module.h>
73114 +
73115 +void
73116 +gr_log_timechange(void)
73117 +{
73118 +#ifdef CONFIG_GRKERNSEC_TIME
73119 + if (grsec_enable_time)
73120 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
73121 +#endif
73122 + return;
73123 +}
73124 +
73125 +EXPORT_SYMBOL(gr_log_timechange);
73126 diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
73127 new file mode 100644
73128 index 0000000..ee57dcf
73129 --- /dev/null
73130 +++ b/grsecurity/grsec_tpe.c
73131 @@ -0,0 +1,73 @@
73132 +#include <linux/kernel.h>
73133 +#include <linux/sched.h>
73134 +#include <linux/file.h>
73135 +#include <linux/fs.h>
73136 +#include <linux/grinternal.h>
73137 +
73138 +extern int gr_acl_tpe_check(void);
73139 +
73140 +int
73141 +gr_tpe_allow(const struct file *file)
73142 +{
73143 +#ifdef CONFIG_GRKERNSEC
73144 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
73145 + const struct cred *cred = current_cred();
73146 + char *msg = NULL;
73147 + char *msg2 = NULL;
73148 +
73149 + // never restrict root
73150 + if (gr_is_global_root(cred->uid))
73151 + return 1;
73152 +
73153 + if (grsec_enable_tpe) {
73154 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
73155 + if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
73156 + msg = "not being in trusted group";
73157 + else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
73158 + msg = "being in untrusted group";
73159 +#else
73160 + if (in_group_p(grsec_tpe_gid))
73161 + msg = "being in untrusted group";
73162 +#endif
73163 + }
73164 + if (!msg && gr_acl_tpe_check())
73165 + msg = "being in untrusted role";
73166 +
73167 + // not in any affected group/role
73168 + if (!msg)
73169 + goto next_check;
73170 +
73171 + if (gr_is_global_nonroot(inode->i_uid))
73172 + msg2 = "file in non-root-owned directory";
73173 + else if (inode->i_mode & S_IWOTH)
73174 + msg2 = "file in world-writable directory";
73175 + else if (inode->i_mode & S_IWGRP)
73176 + msg2 = "file in group-writable directory";
73177 +
73178 + if (msg && msg2) {
73179 + char fullmsg[70] = {0};
73180 + snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
73181 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
73182 + return 0;
73183 + }
73184 + msg = NULL;
73185 +next_check:
73186 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
73187 + if (!grsec_enable_tpe || !grsec_enable_tpe_all)
73188 + return 1;
73189 +
73190 + if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
73191 + msg = "directory not owned by user";
73192 + else if (inode->i_mode & S_IWOTH)
73193 + msg = "file in world-writable directory";
73194 + else if (inode->i_mode & S_IWGRP)
73195 + msg = "file in group-writable directory";
73196 +
73197 + if (msg) {
73198 + gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
73199 + return 0;
73200 + }
73201 +#endif
73202 +#endif
73203 + return 1;
73204 +}
73205 diff --git a/grsecurity/grsec_usb.c b/grsecurity/grsec_usb.c
73206 new file mode 100644
73207 index 0000000..ae02d8e
73208 --- /dev/null
73209 +++ b/grsecurity/grsec_usb.c
73210 @@ -0,0 +1,15 @@
73211 +#include <linux/kernel.h>
73212 +#include <linux/grinternal.h>
73213 +#include <linux/module.h>
73214 +
73215 +int gr_handle_new_usb(void)
73216 +{
73217 +#ifdef CONFIG_GRKERNSEC_DENYUSB
73218 + if (grsec_deny_new_usb) {
73219 + printk(KERN_ALERT "grsec: denied insert of new USB device\n");
73220 + return 1;
73221 + }
73222 +#endif
73223 + return 0;
73224 +}
73225 +EXPORT_SYMBOL_GPL(gr_handle_new_usb);
73226 diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
73227 new file mode 100644
73228 index 0000000..9f7b1ac
73229 --- /dev/null
73230 +++ b/grsecurity/grsum.c
73231 @@ -0,0 +1,61 @@
73232 +#include <linux/err.h>
73233 +#include <linux/kernel.h>
73234 +#include <linux/sched.h>
73235 +#include <linux/mm.h>
73236 +#include <linux/scatterlist.h>
73237 +#include <linux/crypto.h>
73238 +#include <linux/gracl.h>
73239 +
73240 +
73241 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
73242 +#error "crypto and sha256 must be built into the kernel"
73243 +#endif
73244 +
73245 +int
73246 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
73247 +{
73248 + char *p;
73249 + struct crypto_hash *tfm;
73250 + struct hash_desc desc;
73251 + struct scatterlist sg;
73252 + unsigned char temp_sum[GR_SHA_LEN];
73253 + volatile int retval = 0;
73254 + volatile int dummy = 0;
73255 + unsigned int i;
73256 +
73257 + sg_init_table(&sg, 1);
73258 +
73259 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
73260 + if (IS_ERR(tfm)) {
73261 + /* should never happen, since sha256 should be built in */
73262 + return 1;
73263 + }
73264 +
73265 + desc.tfm = tfm;
73266 + desc.flags = 0;
73267 +
73268 + crypto_hash_init(&desc);
73269 +
73270 + p = salt;
73271 + sg_set_buf(&sg, p, GR_SALT_LEN);
73272 + crypto_hash_update(&desc, &sg, sg.length);
73273 +
73274 + p = entry->pw;
73275 + sg_set_buf(&sg, p, strlen(p));
73276 +
73277 + crypto_hash_update(&desc, &sg, sg.length);
73278 +
73279 + crypto_hash_final(&desc, temp_sum);
73280 +
73281 + memset(entry->pw, 0, GR_PW_LEN);
73282 +
73283 + for (i = 0; i < GR_SHA_LEN; i++)
73284 + if (sum[i] != temp_sum[i])
73285 + retval = 1;
73286 + else
73287 + dummy = 1; // waste a cycle
73288 +
73289 + crypto_free_hash(tfm);
73290 +
73291 + return retval;
73292 +}
73293 diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
73294 index 77ff547..181834f 100644
73295 --- a/include/asm-generic/4level-fixup.h
73296 +++ b/include/asm-generic/4level-fixup.h
73297 @@ -13,8 +13,10 @@
73298 #define pmd_alloc(mm, pud, address) \
73299 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
73300 NULL: pmd_offset(pud, address))
73301 +#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
73302
73303 #define pud_alloc(mm, pgd, address) (pgd)
73304 +#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
73305 #define pud_offset(pgd, start) (pgd)
73306 #define pud_none(pud) 0
73307 #define pud_bad(pud) 0
73308 diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
73309 index b7babf0..97f4c4f 100644
73310 --- a/include/asm-generic/atomic-long.h
73311 +++ b/include/asm-generic/atomic-long.h
73312 @@ -22,6 +22,12 @@
73313
73314 typedef atomic64_t atomic_long_t;
73315
73316 +#ifdef CONFIG_PAX_REFCOUNT
73317 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
73318 +#else
73319 +typedef atomic64_t atomic_long_unchecked_t;
73320 +#endif
73321 +
73322 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
73323
73324 static inline long atomic_long_read(atomic_long_t *l)
73325 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
73326 return (long)atomic64_read(v);
73327 }
73328
73329 +#ifdef CONFIG_PAX_REFCOUNT
73330 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
73331 +{
73332 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73333 +
73334 + return (long)atomic64_read_unchecked(v);
73335 +}
73336 +#endif
73337 +
73338 static inline void atomic_long_set(atomic_long_t *l, long i)
73339 {
73340 atomic64_t *v = (atomic64_t *)l;
73341 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
73342 atomic64_set(v, i);
73343 }
73344
73345 +#ifdef CONFIG_PAX_REFCOUNT
73346 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
73347 +{
73348 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73349 +
73350 + atomic64_set_unchecked(v, i);
73351 +}
73352 +#endif
73353 +
73354 static inline void atomic_long_inc(atomic_long_t *l)
73355 {
73356 atomic64_t *v = (atomic64_t *)l;
73357 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
73358 atomic64_inc(v);
73359 }
73360
73361 +#ifdef CONFIG_PAX_REFCOUNT
73362 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
73363 +{
73364 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73365 +
73366 + atomic64_inc_unchecked(v);
73367 +}
73368 +#endif
73369 +
73370 static inline void atomic_long_dec(atomic_long_t *l)
73371 {
73372 atomic64_t *v = (atomic64_t *)l;
73373 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
73374 atomic64_dec(v);
73375 }
73376
73377 +#ifdef CONFIG_PAX_REFCOUNT
73378 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
73379 +{
73380 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73381 +
73382 + atomic64_dec_unchecked(v);
73383 +}
73384 +#endif
73385 +
73386 static inline void atomic_long_add(long i, atomic_long_t *l)
73387 {
73388 atomic64_t *v = (atomic64_t *)l;
73389 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
73390 atomic64_add(i, v);
73391 }
73392
73393 +#ifdef CONFIG_PAX_REFCOUNT
73394 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
73395 +{
73396 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73397 +
73398 + atomic64_add_unchecked(i, v);
73399 +}
73400 +#endif
73401 +
73402 static inline void atomic_long_sub(long i, atomic_long_t *l)
73403 {
73404 atomic64_t *v = (atomic64_t *)l;
73405 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
73406 atomic64_sub(i, v);
73407 }
73408
73409 +#ifdef CONFIG_PAX_REFCOUNT
73410 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
73411 +{
73412 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73413 +
73414 + atomic64_sub_unchecked(i, v);
73415 +}
73416 +#endif
73417 +
73418 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
73419 {
73420 atomic64_t *v = (atomic64_t *)l;
73421 @@ -94,13 +154,22 @@ static inline int atomic_long_add_negative(long i, atomic_long_t *l)
73422 return atomic64_add_negative(i, v);
73423 }
73424
73425 -static inline long atomic_long_add_return(long i, atomic_long_t *l)
73426 +static inline long __intentional_overflow(-1) atomic_long_add_return(long i, atomic_long_t *l)
73427 {
73428 atomic64_t *v = (atomic64_t *)l;
73429
73430 return (long)atomic64_add_return(i, v);
73431 }
73432
73433 +#ifdef CONFIG_PAX_REFCOUNT
73434 +static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
73435 +{
73436 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73437 +
73438 + return (long)atomic64_add_return_unchecked(i, v);
73439 +}
73440 +#endif
73441 +
73442 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
73443 {
73444 atomic64_t *v = (atomic64_t *)l;
73445 @@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
73446 return (long)atomic64_inc_return(v);
73447 }
73448
73449 +#ifdef CONFIG_PAX_REFCOUNT
73450 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
73451 +{
73452 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
73453 +
73454 + return (long)atomic64_inc_return_unchecked(v);
73455 +}
73456 +#endif
73457 +
73458 static inline long atomic_long_dec_return(atomic_long_t *l)
73459 {
73460 atomic64_t *v = (atomic64_t *)l;
73461 @@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
73462
73463 typedef atomic_t atomic_long_t;
73464
73465 +#ifdef CONFIG_PAX_REFCOUNT
73466 +typedef atomic_unchecked_t atomic_long_unchecked_t;
73467 +#else
73468 +typedef atomic_t atomic_long_unchecked_t;
73469 +#endif
73470 +
73471 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
73472 static inline long atomic_long_read(atomic_long_t *l)
73473 {
73474 @@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
73475 return (long)atomic_read(v);
73476 }
73477
73478 +#ifdef CONFIG_PAX_REFCOUNT
73479 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
73480 +{
73481 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73482 +
73483 + return (long)atomic_read_unchecked(v);
73484 +}
73485 +#endif
73486 +
73487 static inline void atomic_long_set(atomic_long_t *l, long i)
73488 {
73489 atomic_t *v = (atomic_t *)l;
73490 @@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
73491 atomic_set(v, i);
73492 }
73493
73494 +#ifdef CONFIG_PAX_REFCOUNT
73495 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
73496 +{
73497 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73498 +
73499 + atomic_set_unchecked(v, i);
73500 +}
73501 +#endif
73502 +
73503 static inline void atomic_long_inc(atomic_long_t *l)
73504 {
73505 atomic_t *v = (atomic_t *)l;
73506 @@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
73507 atomic_inc(v);
73508 }
73509
73510 +#ifdef CONFIG_PAX_REFCOUNT
73511 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
73512 +{
73513 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73514 +
73515 + atomic_inc_unchecked(v);
73516 +}
73517 +#endif
73518 +
73519 static inline void atomic_long_dec(atomic_long_t *l)
73520 {
73521 atomic_t *v = (atomic_t *)l;
73522 @@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
73523 atomic_dec(v);
73524 }
73525
73526 +#ifdef CONFIG_PAX_REFCOUNT
73527 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
73528 +{
73529 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73530 +
73531 + atomic_dec_unchecked(v);
73532 +}
73533 +#endif
73534 +
73535 static inline void atomic_long_add(long i, atomic_long_t *l)
73536 {
73537 atomic_t *v = (atomic_t *)l;
73538 @@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
73539 atomic_add(i, v);
73540 }
73541
73542 +#ifdef CONFIG_PAX_REFCOUNT
73543 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
73544 +{
73545 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73546 +
73547 + atomic_add_unchecked(i, v);
73548 +}
73549 +#endif
73550 +
73551 static inline void atomic_long_sub(long i, atomic_long_t *l)
73552 {
73553 atomic_t *v = (atomic_t *)l;
73554 @@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
73555 atomic_sub(i, v);
73556 }
73557
73558 +#ifdef CONFIG_PAX_REFCOUNT
73559 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
73560 +{
73561 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73562 +
73563 + atomic_sub_unchecked(i, v);
73564 +}
73565 +#endif
73566 +
73567 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
73568 {
73569 atomic_t *v = (atomic_t *)l;
73570 @@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
73571 return (long)atomic_add_return(i, v);
73572 }
73573
73574 +#ifdef CONFIG_PAX_REFCOUNT
73575 +static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
73576 +{
73577 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73578 +
73579 + return (long)atomic_add_return_unchecked(i, v);
73580 +}
73581 +
73582 +#endif
73583 +
73584 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
73585 {
73586 atomic_t *v = (atomic_t *)l;
73587 @@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
73588 return (long)atomic_inc_return(v);
73589 }
73590
73591 +#ifdef CONFIG_PAX_REFCOUNT
73592 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
73593 +{
73594 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
73595 +
73596 + return (long)atomic_inc_return_unchecked(v);
73597 +}
73598 +#endif
73599 +
73600 static inline long atomic_long_dec_return(atomic_long_t *l)
73601 {
73602 atomic_t *v = (atomic_t *)l;
73603 @@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
73604
73605 #endif /* BITS_PER_LONG == 64 */
73606
73607 +#ifdef CONFIG_PAX_REFCOUNT
73608 +static inline void pax_refcount_needs_these_functions(void)
73609 +{
73610 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
73611 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
73612 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
73613 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
73614 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
73615 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
73616 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
73617 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
73618 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
73619 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
73620 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
73621 +#ifdef CONFIG_X86
73622 + atomic_clear_mask_unchecked(0, NULL);
73623 + atomic_set_mask_unchecked(0, NULL);
73624 +#endif
73625 +
73626 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
73627 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
73628 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
73629 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
73630 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
73631 + atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
73632 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
73633 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
73634 +}
73635 +#else
73636 +#define atomic_read_unchecked(v) atomic_read(v)
73637 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
73638 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
73639 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
73640 +#define atomic_inc_unchecked(v) atomic_inc(v)
73641 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
73642 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
73643 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
73644 +#define atomic_dec_unchecked(v) atomic_dec(v)
73645 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
73646 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
73647 +#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
73648 +#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
73649 +
73650 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
73651 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
73652 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
73653 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
73654 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
73655 +#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
73656 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
73657 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
73658 +#endif
73659 +
73660 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
73661 diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
73662 index 33bd2de..f31bff97 100644
73663 --- a/include/asm-generic/atomic.h
73664 +++ b/include/asm-generic/atomic.h
73665 @@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
73666 * Atomically clears the bits set in @mask from @v
73667 */
73668 #ifndef atomic_clear_mask
73669 -static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
73670 +static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
73671 {
73672 unsigned long flags;
73673
73674 diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
73675 index b18ce4f..2ee2843 100644
73676 --- a/include/asm-generic/atomic64.h
73677 +++ b/include/asm-generic/atomic64.h
73678 @@ -16,6 +16,8 @@ typedef struct {
73679 long long counter;
73680 } atomic64_t;
73681
73682 +typedef atomic64_t atomic64_unchecked_t;
73683 +
73684 #define ATOMIC64_INIT(i) { (i) }
73685
73686 extern long long atomic64_read(const atomic64_t *v);
73687 @@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
73688 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
73689 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
73690
73691 +#define atomic64_read_unchecked(v) atomic64_read(v)
73692 +#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
73693 +#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
73694 +#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
73695 +#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
73696 +#define atomic64_inc_unchecked(v) atomic64_inc(v)
73697 +#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
73698 +#define atomic64_dec_unchecked(v) atomic64_dec(v)
73699 +#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
73700 +
73701 #endif /* _ASM_GENERIC_ATOMIC64_H */
73702 diff --git a/include/asm-generic/bitops/__fls.h b/include/asm-generic/bitops/__fls.h
73703 index a60a7cc..0fe12f2 100644
73704 --- a/include/asm-generic/bitops/__fls.h
73705 +++ b/include/asm-generic/bitops/__fls.h
73706 @@ -9,7 +9,7 @@
73707 *
73708 * Undefined if no set bit exists, so code should check against 0 first.
73709 */
73710 -static __always_inline unsigned long __fls(unsigned long word)
73711 +static __always_inline unsigned long __intentional_overflow(-1) __fls(unsigned long word)
73712 {
73713 int num = BITS_PER_LONG - 1;
73714
73715 diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h
73716 index 0576d1f..dad6c71 100644
73717 --- a/include/asm-generic/bitops/fls.h
73718 +++ b/include/asm-generic/bitops/fls.h
73719 @@ -9,7 +9,7 @@
73720 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
73721 */
73722
73723 -static __always_inline int fls(int x)
73724 +static __always_inline int __intentional_overflow(-1) fls(int x)
73725 {
73726 int r = 32;
73727
73728 diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h
73729 index b097cf8..3d40e14 100644
73730 --- a/include/asm-generic/bitops/fls64.h
73731 +++ b/include/asm-generic/bitops/fls64.h
73732 @@ -15,7 +15,7 @@
73733 * at position 64.
73734 */
73735 #if BITS_PER_LONG == 32
73736 -static __always_inline int fls64(__u64 x)
73737 +static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
73738 {
73739 __u32 h = x >> 32;
73740 if (h)
73741 @@ -23,7 +23,7 @@ static __always_inline int fls64(__u64 x)
73742 return fls(x);
73743 }
73744 #elif BITS_PER_LONG == 64
73745 -static __always_inline int fls64(__u64 x)
73746 +static __always_inline int __intentional_overflow(-1) fls64(__u64 x)
73747 {
73748 if (x == 0)
73749 return 0;
73750 diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
73751 index 1bfcfe5..e04c5c9 100644
73752 --- a/include/asm-generic/cache.h
73753 +++ b/include/asm-generic/cache.h
73754 @@ -6,7 +6,7 @@
73755 * cache lines need to provide their own cache.h.
73756 */
73757
73758 -#define L1_CACHE_SHIFT 5
73759 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
73760 +#define L1_CACHE_SHIFT 5UL
73761 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
73762
73763 #endif /* __ASM_GENERIC_CACHE_H */
73764 diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
73765 index 0d68a1e..b74a761 100644
73766 --- a/include/asm-generic/emergency-restart.h
73767 +++ b/include/asm-generic/emergency-restart.h
73768 @@ -1,7 +1,7 @@
73769 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
73770 #define _ASM_GENERIC_EMERGENCY_RESTART_H
73771
73772 -static inline void machine_emergency_restart(void)
73773 +static inline __noreturn void machine_emergency_restart(void)
73774 {
73775 machine_restart(NULL);
73776 }
73777 diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
73778 index 90f99c7..00ce236 100644
73779 --- a/include/asm-generic/kmap_types.h
73780 +++ b/include/asm-generic/kmap_types.h
73781 @@ -2,9 +2,9 @@
73782 #define _ASM_GENERIC_KMAP_TYPES_H
73783
73784 #ifdef __WITH_KM_FENCE
73785 -# define KM_TYPE_NR 41
73786 +# define KM_TYPE_NR 42
73787 #else
73788 -# define KM_TYPE_NR 20
73789 +# define KM_TYPE_NR 21
73790 #endif
73791
73792 #endif
73793 diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
73794 index 9ceb03b..62b0b8f 100644
73795 --- a/include/asm-generic/local.h
73796 +++ b/include/asm-generic/local.h
73797 @@ -23,24 +23,37 @@ typedef struct
73798 atomic_long_t a;
73799 } local_t;
73800
73801 +typedef struct {
73802 + atomic_long_unchecked_t a;
73803 +} local_unchecked_t;
73804 +
73805 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
73806
73807 #define local_read(l) atomic_long_read(&(l)->a)
73808 +#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
73809 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
73810 +#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
73811 #define local_inc(l) atomic_long_inc(&(l)->a)
73812 +#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
73813 #define local_dec(l) atomic_long_dec(&(l)->a)
73814 +#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
73815 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
73816 +#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
73817 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
73818 +#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
73819
73820 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
73821 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
73822 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
73823 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
73824 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
73825 +#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
73826 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
73827 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
73828 +#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
73829
73830 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
73831 +#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
73832 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
73833 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
73834 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
73835 diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
73836 index 725612b..9cc513a 100644
73837 --- a/include/asm-generic/pgtable-nopmd.h
73838 +++ b/include/asm-generic/pgtable-nopmd.h
73839 @@ -1,14 +1,19 @@
73840 #ifndef _PGTABLE_NOPMD_H
73841 #define _PGTABLE_NOPMD_H
73842
73843 -#ifndef __ASSEMBLY__
73844 -
73845 #include <asm-generic/pgtable-nopud.h>
73846
73847 -struct mm_struct;
73848 -
73849 #define __PAGETABLE_PMD_FOLDED
73850
73851 +#define PMD_SHIFT PUD_SHIFT
73852 +#define PTRS_PER_PMD 1
73853 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
73854 +#define PMD_MASK (~(PMD_SIZE-1))
73855 +
73856 +#ifndef __ASSEMBLY__
73857 +
73858 +struct mm_struct;
73859 +
73860 /*
73861 * Having the pmd type consist of a pud gets the size right, and allows
73862 * us to conceptually access the pud entry that this pmd is folded into
73863 @@ -16,11 +21,6 @@ struct mm_struct;
73864 */
73865 typedef struct { pud_t pud; } pmd_t;
73866
73867 -#define PMD_SHIFT PUD_SHIFT
73868 -#define PTRS_PER_PMD 1
73869 -#define PMD_SIZE (1UL << PMD_SHIFT)
73870 -#define PMD_MASK (~(PMD_SIZE-1))
73871 -
73872 /*
73873 * The "pud_xxx()" functions here are trivial for a folded two-level
73874 * setup: the pmd is never bad, and a pmd always exists (as it's folded
73875 diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
73876 index 810431d..0ec4804f 100644
73877 --- a/include/asm-generic/pgtable-nopud.h
73878 +++ b/include/asm-generic/pgtable-nopud.h
73879 @@ -1,10 +1,15 @@
73880 #ifndef _PGTABLE_NOPUD_H
73881 #define _PGTABLE_NOPUD_H
73882
73883 -#ifndef __ASSEMBLY__
73884 -
73885 #define __PAGETABLE_PUD_FOLDED
73886
73887 +#define PUD_SHIFT PGDIR_SHIFT
73888 +#define PTRS_PER_PUD 1
73889 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
73890 +#define PUD_MASK (~(PUD_SIZE-1))
73891 +
73892 +#ifndef __ASSEMBLY__
73893 +
73894 /*
73895 * Having the pud type consist of a pgd gets the size right, and allows
73896 * us to conceptually access the pgd entry that this pud is folded into
73897 @@ -12,11 +17,6 @@
73898 */
73899 typedef struct { pgd_t pgd; } pud_t;
73900
73901 -#define PUD_SHIFT PGDIR_SHIFT
73902 -#define PTRS_PER_PUD 1
73903 -#define PUD_SIZE (1UL << PUD_SHIFT)
73904 -#define PUD_MASK (~(PUD_SIZE-1))
73905 -
73906 /*
73907 * The "pgd_xxx()" functions here are trivial for a folded two-level
73908 * setup: the pud is never bad, and a pud always exists (as it's folded
73909 @@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
73910 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
73911
73912 #define pgd_populate(mm, pgd, pud) do { } while (0)
73913 +#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
73914 /*
73915 * (puds are folded into pgds so this doesn't get actually called,
73916 * but the define is needed for a generic inline function.)
73917 diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
73918 index f330d28..d93bba9 100644
73919 --- a/include/asm-generic/pgtable.h
73920 +++ b/include/asm-generic/pgtable.h
73921 @@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
73922 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
73923 barrier();
73924 #endif
73925 - if (pmd_none(pmdval))
73926 + if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
73927 return 1;
73928 if (unlikely(pmd_bad(pmdval))) {
73929 - if (!pmd_trans_huge(pmdval))
73930 - pmd_clear_bad(pmd);
73931 + pmd_clear_bad(pmd);
73932 return 1;
73933 }
73934 return 0;
73935 @@ -737,6 +736,22 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
73936 }
73937 #endif /* CONFIG_NUMA_BALANCING */
73938
73939 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
73940 +#ifdef CONFIG_PAX_KERNEXEC
73941 +#error KERNEXEC requires pax_open_kernel
73942 +#else
73943 +static inline unsigned long pax_open_kernel(void) { return 0; }
73944 +#endif
73945 +#endif
73946 +
73947 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
73948 +#ifdef CONFIG_PAX_KERNEXEC
73949 +#error KERNEXEC requires pax_close_kernel
73950 +#else
73951 +static inline unsigned long pax_close_kernel(void) { return 0; }
73952 +#endif
73953 +#endif
73954 +
73955 #endif /* CONFIG_MMU */
73956
73957 #endif /* !__ASSEMBLY__ */
73958 diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
73959 index dc1269c..48a4f51 100644
73960 --- a/include/asm-generic/uaccess.h
73961 +++ b/include/asm-generic/uaccess.h
73962 @@ -343,4 +343,20 @@ clear_user(void __user *to, unsigned long n)
73963 return __clear_user(to, n);
73964 }
73965
73966 +#ifndef __HAVE_ARCH_PAX_OPEN_USERLAND
73967 +#ifdef CONFIG_PAX_MEMORY_UDEREF
73968 +#error UDEREF requires pax_open_userland
73969 +#else
73970 +static inline unsigned long pax_open_userland(void) { return 0; }
73971 +#endif
73972 +#endif
73973 +
73974 +#ifndef __HAVE_ARCH_PAX_CLOSE_USERLAND
73975 +#ifdef CONFIG_PAX_MEMORY_UDEREF
73976 +#error UDEREF requires pax_close_userland
73977 +#else
73978 +static inline unsigned long pax_close_userland(void) { return 0; }
73979 +#endif
73980 +#endif
73981 +
73982 #endif /* __ASM_GENERIC_UACCESS_H */
73983 diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
73984 index 83e2c31..eeb4a04 100644
73985 --- a/include/asm-generic/vmlinux.lds.h
73986 +++ b/include/asm-generic/vmlinux.lds.h
73987 @@ -232,6 +232,7 @@
73988 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
73989 VMLINUX_SYMBOL(__start_rodata) = .; \
73990 *(.rodata) *(.rodata.*) \
73991 + *(.data..read_only) \
73992 *(__vermagic) /* Kernel version magic */ \
73993 . = ALIGN(8); \
73994 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
73995 @@ -715,17 +716,18 @@
73996 * section in the linker script will go there too. @phdr should have
73997 * a leading colon.
73998 *
73999 - * Note that this macros defines __per_cpu_load as an absolute symbol.
74000 + * Note that this macros defines per_cpu_load as an absolute symbol.
74001 * If there is no need to put the percpu section at a predetermined
74002 * address, use PERCPU_SECTION.
74003 */
74004 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
74005 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
74006 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
74007 + per_cpu_load = .; \
74008 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
74009 - LOAD_OFFSET) { \
74010 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
74011 PERCPU_INPUT(cacheline) \
74012 } phdr \
74013 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
74014 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
74015
74016 /**
74017 * PERCPU_SECTION - define output section for percpu area, simple version
74018 diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
74019 index 418d270..bfd2794 100644
74020 --- a/include/crypto/algapi.h
74021 +++ b/include/crypto/algapi.h
74022 @@ -34,7 +34,7 @@ struct crypto_type {
74023 unsigned int maskclear;
74024 unsigned int maskset;
74025 unsigned int tfmsize;
74026 -};
74027 +} __do_const;
74028
74029 struct crypto_instance {
74030 struct crypto_alg alg;
74031 diff --git a/include/drm/drmP.h b/include/drm/drmP.h
74032 index b46fb45..b30d6d5 100644
74033 --- a/include/drm/drmP.h
74034 +++ b/include/drm/drmP.h
74035 @@ -66,6 +66,7 @@
74036 #include <linux/workqueue.h>
74037 #include <linux/poll.h>
74038 #include <asm/pgalloc.h>
74039 +#include <asm/local.h>
74040 #include <drm/drm.h>
74041 #include <drm/drm_sarea.h>
74042 #include <drm/drm_vma_manager.h>
74043 @@ -277,10 +278,12 @@ do { \
74044 * \param cmd command.
74045 * \param arg argument.
74046 */
74047 -typedef int drm_ioctl_t(struct drm_device *dev, void *data,
74048 +typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
74049 + struct drm_file *file_priv);
74050 +typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
74051 struct drm_file *file_priv);
74052
74053 -typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
74054 +typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
74055 unsigned long arg);
74056
74057 #define DRM_IOCTL_NR(n) _IOC_NR(n)
74058 @@ -296,10 +299,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
74059 struct drm_ioctl_desc {
74060 unsigned int cmd;
74061 int flags;
74062 - drm_ioctl_t *func;
74063 + drm_ioctl_t func;
74064 unsigned int cmd_drv;
74065 const char *name;
74066 -};
74067 +} __do_const;
74068
74069 /**
74070 * Creates a driver or general drm_ioctl_desc array entry for the given
74071 @@ -1027,7 +1030,7 @@ struct drm_info_list {
74072 int (*show)(struct seq_file*, void*); /** show callback */
74073 u32 driver_features; /**< Required driver features for this entry */
74074 void *data;
74075 -};
74076 +} __do_const;
74077
74078 /**
74079 * debugfs node structure. This structure represents a debugfs file.
74080 @@ -1098,7 +1101,7 @@ struct drm_device {
74081
74082 /** \name Usage Counters */
74083 /*@{ */
74084 - int open_count; /**< Outstanding files open */
74085 + local_t open_count; /**< Outstanding files open */
74086 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
74087 atomic_t vma_count; /**< Outstanding vma areas open */
74088 int buf_use; /**< Buffers in use -- cannot alloc */
74089 @@ -1109,7 +1112,7 @@ struct drm_device {
74090 /*@{ */
74091 unsigned long counters;
74092 enum drm_stat_type types[15];
74093 - atomic_t counts[15];
74094 + atomic_unchecked_t counts[15];
74095 /*@} */
74096
74097 struct list_head filelist;
74098 diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
74099 index f43d556..94d9343 100644
74100 --- a/include/drm/drm_crtc_helper.h
74101 +++ b/include/drm/drm_crtc_helper.h
74102 @@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
74103 struct drm_connector *connector);
74104 /* disable encoder when not in use - more explicit than dpms off */
74105 void (*disable)(struct drm_encoder *encoder);
74106 -};
74107 +} __no_const;
74108
74109 /**
74110 * drm_connector_helper_funcs - helper operations for connectors
74111 diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
74112 index 8a10f5c..5de7f5c 100644
74113 --- a/include/drm/i915_pciids.h
74114 +++ b/include/drm/i915_pciids.h
74115 @@ -37,7 +37,7 @@
74116 */
74117 #define INTEL_VGA_DEVICE(id, info) { \
74118 0x8086, id, \
74119 - ~0, ~0, \
74120 + PCI_ANY_ID, PCI_ANY_ID, \
74121 0x030000, 0xff0000, \
74122 (unsigned long) info }
74123
74124 diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
74125 index 72dcbe8..8db58d7 100644
74126 --- a/include/drm/ttm/ttm_memory.h
74127 +++ b/include/drm/ttm/ttm_memory.h
74128 @@ -48,7 +48,7 @@
74129
74130 struct ttm_mem_shrink {
74131 int (*do_shrink) (struct ttm_mem_shrink *);
74132 -};
74133 +} __no_const;
74134
74135 /**
74136 * struct ttm_mem_global - Global memory accounting structure.
74137 diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
74138 index 4b840e8..155d235 100644
74139 --- a/include/keys/asymmetric-subtype.h
74140 +++ b/include/keys/asymmetric-subtype.h
74141 @@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
74142 /* Verify the signature on a key of this subtype (optional) */
74143 int (*verify_signature)(const struct key *key,
74144 const struct public_key_signature *sig);
74145 -};
74146 +} __do_const;
74147
74148 /**
74149 * asymmetric_key_subtype - Get the subtype from an asymmetric key
74150 diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
74151 index c1da539..1dcec55 100644
74152 --- a/include/linux/atmdev.h
74153 +++ b/include/linux/atmdev.h
74154 @@ -28,7 +28,7 @@ struct compat_atm_iobuf {
74155 #endif
74156
74157 struct k_atm_aal_stats {
74158 -#define __HANDLE_ITEM(i) atomic_t i
74159 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
74160 __AAL_STAT_ITEMS
74161 #undef __HANDLE_ITEM
74162 };
74163 @@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
74164 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
74165 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
74166 struct module *owner;
74167 -};
74168 +} __do_const ;
74169
74170 struct atmphy_ops {
74171 int (*start)(struct atm_dev *dev);
74172 diff --git a/include/linux/audit.h b/include/linux/audit.h
74173 index 729a4d1..9b304ae 100644
74174 --- a/include/linux/audit.h
74175 +++ b/include/linux/audit.h
74176 @@ -193,7 +193,7 @@ static inline void audit_ptrace(struct task_struct *t)
74177 extern unsigned int audit_serial(void);
74178 extern int auditsc_get_stamp(struct audit_context *ctx,
74179 struct timespec *t, unsigned int *serial);
74180 -extern int audit_set_loginuid(kuid_t loginuid);
74181 +extern int __intentional_overflow(-1) audit_set_loginuid(kuid_t loginuid);
74182
74183 static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
74184 {
74185 diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
74186 index 669fef5..3e0fbe4 100644
74187 --- a/include/linux/auxvec.h
74188 +++ b/include/linux/auxvec.h
74189 @@ -3,6 +3,6 @@
74190
74191 #include <uapi/linux/auxvec.h>
74192
74193 -#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */
74194 +#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
74195 /* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
74196 #endif /* _LINUX_AUXVEC_H */
74197 diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
74198 index 7554fd4..0f86379 100644
74199 --- a/include/linux/binfmts.h
74200 +++ b/include/linux/binfmts.h
74201 @@ -73,8 +73,10 @@ struct linux_binfmt {
74202 int (*load_binary)(struct linux_binprm *);
74203 int (*load_shlib)(struct file *);
74204 int (*core_dump)(struct coredump_params *cprm);
74205 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
74206 + void (*handle_mmap)(struct file *);
74207 unsigned long min_coredump; /* minimal dump size */
74208 -};
74209 +} __do_const;
74210
74211 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
74212
74213 diff --git a/include/linux/bitops.h b/include/linux/bitops.h
74214 index a3b6b82..2a7d758 100644
74215 --- a/include/linux/bitops.h
74216 +++ b/include/linux/bitops.h
74217 @@ -91,7 +91,7 @@ static inline __u64 ror64(__u64 word, unsigned int shift)
74218 * @word: value to rotate
74219 * @shift: bits to roll
74220 */
74221 -static inline __u32 rol32(__u32 word, unsigned int shift)
74222 +static inline __u32 __intentional_overflow(-1) rol32(__u32 word, unsigned int shift)
74223 {
74224 return (word << shift) | (word >> (32 - shift));
74225 }
74226 @@ -101,7 +101,7 @@ static inline __u32 rol32(__u32 word, unsigned int shift)
74227 * @word: value to rotate
74228 * @shift: bits to roll
74229 */
74230 -static inline __u32 ror32(__u32 word, unsigned int shift)
74231 +static inline __u32 __intentional_overflow(-1) ror32(__u32 word, unsigned int shift)
74232 {
74233 return (word >> shift) | (word << (32 - shift));
74234 }
74235 @@ -157,7 +157,7 @@ static inline __s32 sign_extend32(__u32 value, int index)
74236 return (__s32)(value << shift) >> shift;
74237 }
74238
74239 -static inline unsigned fls_long(unsigned long l)
74240 +static inline unsigned __intentional_overflow(-1) fls_long(unsigned long l)
74241 {
74242 if (sizeof(l) == 4)
74243 return fls(l);
74244 diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
74245 index 0e6f765..885bb2b 100644
74246 --- a/include/linux/blkdev.h
74247 +++ b/include/linux/blkdev.h
74248 @@ -1537,7 +1537,7 @@ struct block_device_operations {
74249 /* this callback is with swap_lock and sometimes page table lock held */
74250 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
74251 struct module *owner;
74252 -};
74253 +} __do_const;
74254
74255 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
74256 unsigned long);
74257 diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
74258 index 7c2e030..b72475d 100644
74259 --- a/include/linux/blktrace_api.h
74260 +++ b/include/linux/blktrace_api.h
74261 @@ -23,7 +23,7 @@ struct blk_trace {
74262 struct dentry *dir;
74263 struct dentry *dropped_file;
74264 struct dentry *msg_file;
74265 - atomic_t dropped;
74266 + atomic_unchecked_t dropped;
74267 };
74268
74269 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
74270 diff --git a/include/linux/cache.h b/include/linux/cache.h
74271 index 4c57065..40346da 100644
74272 --- a/include/linux/cache.h
74273 +++ b/include/linux/cache.h
74274 @@ -16,6 +16,14 @@
74275 #define __read_mostly
74276 #endif
74277
74278 +#ifndef __read_only
74279 +#ifdef CONFIG_PAX_KERNEXEC
74280 +#error KERNEXEC requires __read_only
74281 +#else
74282 +#define __read_only __read_mostly
74283 +#endif
74284 +#endif
74285 +
74286 #ifndef ____cacheline_aligned
74287 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
74288 #endif
74289 diff --git a/include/linux/capability.h b/include/linux/capability.h
74290 index a6ee1f9..e1ca49d 100644
74291 --- a/include/linux/capability.h
74292 +++ b/include/linux/capability.h
74293 @@ -212,8 +212,13 @@ extern bool capable(int cap);
74294 extern bool ns_capable(struct user_namespace *ns, int cap);
74295 extern bool inode_capable(const struct inode *inode, int cap);
74296 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
74297 +extern bool capable_nolog(int cap);
74298 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
74299 +extern bool inode_capable_nolog(const struct inode *inode, int cap);
74300
74301 /* audit system wants to get cap info from files as well */
74302 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
74303
74304 +extern int is_privileged_binary(const struct dentry *dentry);
74305 +
74306 #endif /* !_LINUX_CAPABILITY_H */
74307 diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
74308 index 8609d57..86e4d79 100644
74309 --- a/include/linux/cdrom.h
74310 +++ b/include/linux/cdrom.h
74311 @@ -87,7 +87,6 @@ struct cdrom_device_ops {
74312
74313 /* driver specifications */
74314 const int capability; /* capability flags */
74315 - int n_minors; /* number of active minor devices */
74316 /* handle uniform packets for scsi type devices (scsi,atapi) */
74317 int (*generic_packet) (struct cdrom_device_info *,
74318 struct packet_command *);
74319 diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
74320 index 4ce9056..86caac6 100644
74321 --- a/include/linux/cleancache.h
74322 +++ b/include/linux/cleancache.h
74323 @@ -31,7 +31,7 @@ struct cleancache_ops {
74324 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
74325 void (*invalidate_inode)(int, struct cleancache_filekey);
74326 void (*invalidate_fs)(int);
74327 -};
74328 +} __no_const;
74329
74330 extern struct cleancache_ops *
74331 cleancache_register_ops(struct cleancache_ops *ops);
74332 diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
74333 index 73bdb69..d66d47a 100644
74334 --- a/include/linux/clk-provider.h
74335 +++ b/include/linux/clk-provider.h
74336 @@ -141,6 +141,7 @@ struct clk_ops {
74337 unsigned long);
74338 void (*init)(struct clk_hw *hw);
74339 };
74340 +typedef struct clk_ops __no_const clk_ops_no_const;
74341
74342 /**
74343 * struct clk_init_data - holds init data that's common to all clocks and is
74344 diff --git a/include/linux/compat.h b/include/linux/compat.h
74345 index 345da00..b6eff26 100644
74346 --- a/include/linux/compat.h
74347 +++ b/include/linux/compat.h
74348 @@ -313,7 +313,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
74349 compat_size_t __user *len_ptr);
74350
74351 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
74352 -asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
74353 +asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
74354 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
74355 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
74356 compat_ssize_t msgsz, int msgflg);
74357 @@ -420,7 +420,7 @@ extern int compat_ptrace_request(struct task_struct *child,
74358 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
74359 compat_ulong_t addr, compat_ulong_t data);
74360 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
74361 - compat_long_t addr, compat_long_t data);
74362 + compat_ulong_t addr, compat_ulong_t data);
74363
74364 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
74365 /*
74366 diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
74367 index ded4299..da50e3b 100644
74368 --- a/include/linux/compiler-gcc4.h
74369 +++ b/include/linux/compiler-gcc4.h
74370 @@ -39,9 +39,29 @@
74371 # define __compiletime_warning(message) __attribute__((warning(message)))
74372 # define __compiletime_error(message) __attribute__((error(message)))
74373 #endif /* __CHECKER__ */
74374 +
74375 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
74376 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
74377 +#define __bos0(ptr) __bos((ptr), 0)
74378 +#define __bos1(ptr) __bos((ptr), 1)
74379 #endif /* GCC_VERSION >= 40300 */
74380
74381 #if GCC_VERSION >= 40500
74382 +
74383 +#ifdef CONSTIFY_PLUGIN
74384 +#define __no_const __attribute__((no_const))
74385 +#define __do_const __attribute__((do_const))
74386 +#endif
74387 +
74388 +#ifdef SIZE_OVERFLOW_PLUGIN
74389 +#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
74390 +#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
74391 +#endif
74392 +
74393 +#ifdef LATENT_ENTROPY_PLUGIN
74394 +#define __latent_entropy __attribute__((latent_entropy))
74395 +#endif
74396 +
74397 /*
74398 * Mark a position in code as unreachable. This can be used to
74399 * suppress control flow warnings after asm blocks that transfer
74400 diff --git a/include/linux/compiler.h b/include/linux/compiler.h
74401 index 92669cd..1771a15 100644
74402 --- a/include/linux/compiler.h
74403 +++ b/include/linux/compiler.h
74404 @@ -5,11 +5,14 @@
74405
74406 #ifdef __CHECKER__
74407 # define __user __attribute__((noderef, address_space(1)))
74408 +# define __force_user __force __user
74409 # define __kernel __attribute__((address_space(0)))
74410 +# define __force_kernel __force __kernel
74411 # define __safe __attribute__((safe))
74412 # define __force __attribute__((force))
74413 # define __nocast __attribute__((nocast))
74414 # define __iomem __attribute__((noderef, address_space(2)))
74415 +# define __force_iomem __force __iomem
74416 # define __must_hold(x) __attribute__((context(x,1,1)))
74417 # define __acquires(x) __attribute__((context(x,0,1)))
74418 # define __releases(x) __attribute__((context(x,1,0)))
74419 @@ -17,20 +20,37 @@
74420 # define __release(x) __context__(x,-1)
74421 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
74422 # define __percpu __attribute__((noderef, address_space(3)))
74423 +# define __force_percpu __force __percpu
74424 #ifdef CONFIG_SPARSE_RCU_POINTER
74425 # define __rcu __attribute__((noderef, address_space(4)))
74426 +# define __force_rcu __force __rcu
74427 #else
74428 # define __rcu
74429 +# define __force_rcu
74430 #endif
74431 extern void __chk_user_ptr(const volatile void __user *);
74432 extern void __chk_io_ptr(const volatile void __iomem *);
74433 #else
74434 -# define __user
74435 -# define __kernel
74436 +# ifdef CHECKER_PLUGIN
74437 +//# define __user
74438 +//# define __force_user
74439 +//# define __kernel
74440 +//# define __force_kernel
74441 +# else
74442 +# ifdef STRUCTLEAK_PLUGIN
74443 +# define __user __attribute__((user))
74444 +# else
74445 +# define __user
74446 +# endif
74447 +# define __force_user
74448 +# define __kernel
74449 +# define __force_kernel
74450 +# endif
74451 # define __safe
74452 # define __force
74453 # define __nocast
74454 # define __iomem
74455 +# define __force_iomem
74456 # define __chk_user_ptr(x) (void)0
74457 # define __chk_io_ptr(x) (void)0
74458 # define __builtin_warning(x, y...) (1)
74459 @@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
74460 # define __release(x) (void)0
74461 # define __cond_lock(x,c) (c)
74462 # define __percpu
74463 +# define __force_percpu
74464 # define __rcu
74465 +# define __force_rcu
74466 #endif
74467
74468 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
74469 @@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
74470 # define __attribute_const__ /* unimplemented */
74471 #endif
74472
74473 +#ifndef __no_const
74474 +# define __no_const
74475 +#endif
74476 +
74477 +#ifndef __do_const
74478 +# define __do_const
74479 +#endif
74480 +
74481 +#ifndef __size_overflow
74482 +# define __size_overflow(...)
74483 +#endif
74484 +
74485 +#ifndef __intentional_overflow
74486 +# define __intentional_overflow(...)
74487 +#endif
74488 +
74489 +#ifndef __latent_entropy
74490 +# define __latent_entropy
74491 +#endif
74492 +
74493 /*
74494 * Tell gcc if a function is cold. The compiler will assume any path
74495 * directly leading to the call is unlikely.
74496 @@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
74497 #define __cold
74498 #endif
74499
74500 +#ifndef __alloc_size
74501 +#define __alloc_size(...)
74502 +#endif
74503 +
74504 +#ifndef __bos
74505 +#define __bos(ptr, arg)
74506 +#endif
74507 +
74508 +#ifndef __bos0
74509 +#define __bos0(ptr)
74510 +#endif
74511 +
74512 +#ifndef __bos1
74513 +#define __bos1(ptr)
74514 +#endif
74515 +
74516 /* Simple shorthand for a section definition */
74517 #ifndef __section
74518 # define __section(S) __attribute__ ((__section__(#S)))
74519 @@ -349,7 +407,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
74520 * use is to mediate communication between process-level code and irq/NMI
74521 * handlers, all running on the same CPU.
74522 */
74523 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
74524 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
74525 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
74526
74527 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
74528 #ifdef CONFIG_KPROBES
74529 diff --git a/include/linux/completion.h b/include/linux/completion.h
74530 index 3cd574d..240dcb0 100644
74531 --- a/include/linux/completion.h
74532 +++ b/include/linux/completion.h
74533 @@ -78,16 +78,16 @@ static inline void init_completion(struct completion *x)
74534
74535 extern void wait_for_completion(struct completion *);
74536 extern void wait_for_completion_io(struct completion *);
74537 -extern int wait_for_completion_interruptible(struct completion *x);
74538 -extern int wait_for_completion_killable(struct completion *x);
74539 +extern int wait_for_completion_interruptible(struct completion *x) __intentional_overflow(-1);
74540 +extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
74541 extern unsigned long wait_for_completion_timeout(struct completion *x,
74542 - unsigned long timeout);
74543 + unsigned long timeout) __intentional_overflow(-1);
74544 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
74545 - unsigned long timeout);
74546 + unsigned long timeout) __intentional_overflow(-1);
74547 extern long wait_for_completion_interruptible_timeout(
74548 - struct completion *x, unsigned long timeout);
74549 + struct completion *x, unsigned long timeout) __intentional_overflow(-1);
74550 extern long wait_for_completion_killable_timeout(
74551 - struct completion *x, unsigned long timeout);
74552 + struct completion *x, unsigned long timeout) __intentional_overflow(-1);
74553 extern bool try_wait_for_completion(struct completion *x);
74554 extern bool completion_done(struct completion *x);
74555
74556 diff --git a/include/linux/configfs.h b/include/linux/configfs.h
74557 index 34025df..d94bbbc 100644
74558 --- a/include/linux/configfs.h
74559 +++ b/include/linux/configfs.h
74560 @@ -125,7 +125,7 @@ struct configfs_attribute {
74561 const char *ca_name;
74562 struct module *ca_owner;
74563 umode_t ca_mode;
74564 -};
74565 +} __do_const;
74566
74567 /*
74568 * Users often need to create attribute structures for their configurable
74569 diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
74570 index fcabc42..cba5d93 100644
74571 --- a/include/linux/cpufreq.h
74572 +++ b/include/linux/cpufreq.h
74573 @@ -167,6 +167,7 @@ struct global_attr {
74574 ssize_t (*store)(struct kobject *a, struct attribute *b,
74575 const char *c, size_t count);
74576 };
74577 +typedef struct global_attr __no_const global_attr_no_const;
74578
74579 #define define_one_global_ro(_name) \
74580 static struct global_attr _name = \
74581 @@ -208,7 +209,7 @@ struct cpufreq_driver {
74582 int (*suspend) (struct cpufreq_policy *policy);
74583 int (*resume) (struct cpufreq_policy *policy);
74584 struct freq_attr **attr;
74585 -};
74586 +} __do_const;
74587
74588 /* flags */
74589 #define CPUFREQ_STICKY 0x01 /* the driver isn't removed even if
74590 diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
74591 index 781addc..d1e1fe6 100644
74592 --- a/include/linux/cpuidle.h
74593 +++ b/include/linux/cpuidle.h
74594 @@ -50,7 +50,8 @@ struct cpuidle_state {
74595 int index);
74596
74597 int (*enter_dead) (struct cpuidle_device *dev, int index);
74598 -};
74599 +} __do_const;
74600 +typedef struct cpuidle_state __no_const cpuidle_state_no_const;
74601
74602 /* Idle State Flags */
74603 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
74604 @@ -192,7 +193,7 @@ struct cpuidle_governor {
74605 void (*reflect) (struct cpuidle_device *dev, int index);
74606
74607 struct module *owner;
74608 -};
74609 +} __do_const;
74610
74611 #ifdef CONFIG_CPU_IDLE
74612
74613 diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
74614 index d08e4d2..95fad61 100644
74615 --- a/include/linux/cpumask.h
74616 +++ b/include/linux/cpumask.h
74617 @@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
74618 }
74619
74620 /* Valid inputs for n are -1 and 0. */
74621 -static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
74622 +static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
74623 {
74624 return n+1;
74625 }
74626
74627 -static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
74628 +static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
74629 {
74630 return n+1;
74631 }
74632
74633 -static inline unsigned int cpumask_next_and(int n,
74634 +static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
74635 const struct cpumask *srcp,
74636 const struct cpumask *andp)
74637 {
74638 @@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
74639 *
74640 * Returns >= nr_cpu_ids if no further cpus set.
74641 */
74642 -static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
74643 +static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
74644 {
74645 /* -1 is a legal arg here. */
74646 if (n != -1)
74647 @@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
74648 *
74649 * Returns >= nr_cpu_ids if no further cpus unset.
74650 */
74651 -static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
74652 +static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
74653 {
74654 /* -1 is a legal arg here. */
74655 if (n != -1)
74656 @@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
74657 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
74658 }
74659
74660 -int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
74661 +int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
74662 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
74663
74664 /**
74665 diff --git a/include/linux/cred.h b/include/linux/cred.h
74666 index 04421e8..6bce4ef 100644
74667 --- a/include/linux/cred.h
74668 +++ b/include/linux/cred.h
74669 @@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
74670 static inline void validate_process_creds(void)
74671 {
74672 }
74673 +static inline void validate_task_creds(struct task_struct *task)
74674 +{
74675 +}
74676 #endif
74677
74678 /**
74679 diff --git a/include/linux/crypto.h b/include/linux/crypto.h
74680 index b92eadf..b4ecdc1 100644
74681 --- a/include/linux/crypto.h
74682 +++ b/include/linux/crypto.h
74683 @@ -373,7 +373,7 @@ struct cipher_tfm {
74684 const u8 *key, unsigned int keylen);
74685 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
74686 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
74687 -};
74688 +} __no_const;
74689
74690 struct hash_tfm {
74691 int (*init)(struct hash_desc *desc);
74692 @@ -394,13 +394,13 @@ struct compress_tfm {
74693 int (*cot_decompress)(struct crypto_tfm *tfm,
74694 const u8 *src, unsigned int slen,
74695 u8 *dst, unsigned int *dlen);
74696 -};
74697 +} __no_const;
74698
74699 struct rng_tfm {
74700 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
74701 unsigned int dlen);
74702 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
74703 -};
74704 +} __no_const;
74705
74706 #define crt_ablkcipher crt_u.ablkcipher
74707 #define crt_aead crt_u.aead
74708 diff --git a/include/linux/ctype.h b/include/linux/ctype.h
74709 index 653589e..4ef254a 100644
74710 --- a/include/linux/ctype.h
74711 +++ b/include/linux/ctype.h
74712 @@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
74713 * Fast implementation of tolower() for internal usage. Do not use in your
74714 * code.
74715 */
74716 -static inline char _tolower(const char c)
74717 +static inline unsigned char _tolower(const unsigned char c)
74718 {
74719 return c | 0x20;
74720 }
74721 diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
74722 index 7925bf0..d5143d2 100644
74723 --- a/include/linux/decompress/mm.h
74724 +++ b/include/linux/decompress/mm.h
74725 @@ -77,7 +77,7 @@ static void free(void *where)
74726 * warnings when not needed (indeed large_malloc / large_free are not
74727 * needed by inflate */
74728
74729 -#define malloc(a) kmalloc(a, GFP_KERNEL)
74730 +#define malloc(a) kmalloc((a), GFP_KERNEL)
74731 #define free(a) kfree(a)
74732
74733 #define large_malloc(a) vmalloc(a)
74734 diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
74735 index 5f1ab92..39c35ae 100644
74736 --- a/include/linux/devfreq.h
74737 +++ b/include/linux/devfreq.h
74738 @@ -114,7 +114,7 @@ struct devfreq_governor {
74739 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
74740 int (*event_handler)(struct devfreq *devfreq,
74741 unsigned int event, void *data);
74742 -};
74743 +} __do_const;
74744
74745 /**
74746 * struct devfreq - Device devfreq structure
74747 diff --git a/include/linux/device.h b/include/linux/device.h
74748 index 2a9d6ed..d14551e3 100644
74749 --- a/include/linux/device.h
74750 +++ b/include/linux/device.h
74751 @@ -313,7 +313,7 @@ struct subsys_interface {
74752 struct list_head node;
74753 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
74754 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
74755 -};
74756 +} __do_const;
74757
74758 int subsys_interface_register(struct subsys_interface *sif);
74759 void subsys_interface_unregister(struct subsys_interface *sif);
74760 @@ -501,7 +501,7 @@ struct device_type {
74761 void (*release)(struct device *dev);
74762
74763 const struct dev_pm_ops *pm;
74764 -};
74765 +} __do_const;
74766
74767 /* interface for exporting device attributes */
74768 struct device_attribute {
74769 @@ -511,11 +511,12 @@ struct device_attribute {
74770 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
74771 const char *buf, size_t count);
74772 };
74773 +typedef struct device_attribute __no_const device_attribute_no_const;
74774
74775 struct dev_ext_attribute {
74776 struct device_attribute attr;
74777 void *var;
74778 -};
74779 +} __do_const;
74780
74781 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
74782 char *buf);
74783 diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
74784 index 3a8d0a2..c762be2 100644
74785 --- a/include/linux/dma-mapping.h
74786 +++ b/include/linux/dma-mapping.h
74787 @@ -54,7 +54,7 @@ struct dma_map_ops {
74788 u64 (*get_required_mask)(struct device *dev);
74789 #endif
74790 int is_phys;
74791 -};
74792 +} __do_const;
74793
74794 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
74795
74796 diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
74797 index 0bc7275..4ccbf11 100644
74798 --- a/include/linux/dmaengine.h
74799 +++ b/include/linux/dmaengine.h
74800 @@ -1078,9 +1078,9 @@ struct dma_pinned_list {
74801 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
74802 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
74803
74804 -dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
74805 +dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
74806 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
74807 -dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
74808 +dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
74809 struct dma_pinned_list *pinned_list, struct page *page,
74810 unsigned int offset, size_t len);
74811
74812 diff --git a/include/linux/efi.h b/include/linux/efi.h
74813 index 094ddd0..f1dfcd3 100644
74814 --- a/include/linux/efi.h
74815 +++ b/include/linux/efi.h
74816 @@ -745,6 +745,7 @@ struct efivar_operations {
74817 efi_set_variable_t *set_variable;
74818 efi_query_variable_store_t *query_variable_store;
74819 };
74820 +typedef struct efivar_operations __no_const efivar_operations_no_const;
74821
74822 struct efivars {
74823 /*
74824 diff --git a/include/linux/elf.h b/include/linux/elf.h
74825 index 40a3c0e0..4c45a38 100644
74826 --- a/include/linux/elf.h
74827 +++ b/include/linux/elf.h
74828 @@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
74829 #define elf_note elf32_note
74830 #define elf_addr_t Elf32_Off
74831 #define Elf_Half Elf32_Half
74832 +#define elf_dyn Elf32_Dyn
74833
74834 #else
74835
74836 @@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
74837 #define elf_note elf64_note
74838 #define elf_addr_t Elf64_Off
74839 #define Elf_Half Elf64_Half
74840 +#define elf_dyn Elf64_Dyn
74841
74842 #endif
74843
74844 diff --git a/include/linux/err.h b/include/linux/err.h
74845 index 15f92e0..e825a8e 100644
74846 --- a/include/linux/err.h
74847 +++ b/include/linux/err.h
74848 @@ -19,12 +19,12 @@
74849
74850 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
74851
74852 -static inline void * __must_check ERR_PTR(long error)
74853 +static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
74854 {
74855 return (void *) error;
74856 }
74857
74858 -static inline long __must_check PTR_ERR(__force const void *ptr)
74859 +static inline long __must_check __intentional_overflow(-1) PTR_ERR(__force const void *ptr)
74860 {
74861 return (long) ptr;
74862 }
74863 diff --git a/include/linux/extcon.h b/include/linux/extcon.h
74864 index fcb51c8..bdafcf6 100644
74865 --- a/include/linux/extcon.h
74866 +++ b/include/linux/extcon.h
74867 @@ -134,7 +134,7 @@ struct extcon_dev {
74868 /* /sys/class/extcon/.../mutually_exclusive/... */
74869 struct attribute_group attr_g_muex;
74870 struct attribute **attrs_muex;
74871 - struct device_attribute *d_attrs_muex;
74872 + device_attribute_no_const *d_attrs_muex;
74873 };
74874
74875 /**
74876 diff --git a/include/linux/fb.h b/include/linux/fb.h
74877 index ffac70a..ca3e711 100644
74878 --- a/include/linux/fb.h
74879 +++ b/include/linux/fb.h
74880 @@ -304,7 +304,7 @@ struct fb_ops {
74881 /* called at KDB enter and leave time to prepare the console */
74882 int (*fb_debug_enter)(struct fb_info *info);
74883 int (*fb_debug_leave)(struct fb_info *info);
74884 -};
74885 +} __do_const;
74886
74887 #ifdef CONFIG_FB_TILEBLITTING
74888 #define FB_TILE_CURSOR_NONE 0
74889 diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
74890 index 085197b..0fa6f0b 100644
74891 --- a/include/linux/fdtable.h
74892 +++ b/include/linux/fdtable.h
74893 @@ -95,7 +95,7 @@ struct files_struct *get_files_struct(struct task_struct *);
74894 void put_files_struct(struct files_struct *fs);
74895 void reset_files_struct(struct files_struct *);
74896 int unshare_files(struct files_struct **);
74897 -struct files_struct *dup_fd(struct files_struct *, int *);
74898 +struct files_struct *dup_fd(struct files_struct *, int *) __latent_entropy;
74899 void do_close_on_exec(struct files_struct *);
74900 int iterate_fd(struct files_struct *, unsigned,
74901 int (*)(const void *, struct file *, unsigned),
74902 diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
74903 index 8293262..2b3b8bd 100644
74904 --- a/include/linux/frontswap.h
74905 +++ b/include/linux/frontswap.h
74906 @@ -11,7 +11,7 @@ struct frontswap_ops {
74907 int (*load)(unsigned, pgoff_t, struct page *);
74908 void (*invalidate_page)(unsigned, pgoff_t);
74909 void (*invalidate_area)(unsigned);
74910 -};
74911 +} __no_const;
74912
74913 extern bool frontswap_enabled;
74914 extern struct frontswap_ops *
74915 diff --git a/include/linux/fs.h b/include/linux/fs.h
74916 index 164d2a9..0ffa41d0 100644
74917 --- a/include/linux/fs.h
74918 +++ b/include/linux/fs.h
74919 @@ -1552,7 +1552,8 @@ struct file_operations {
74920 long (*fallocate)(struct file *file, int mode, loff_t offset,
74921 loff_t len);
74922 int (*show_fdinfo)(struct seq_file *m, struct file *f);
74923 -};
74924 +} __do_const;
74925 +typedef struct file_operations __no_const file_operations_no_const;
74926
74927 struct inode_operations {
74928 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
74929 @@ -2747,4 +2748,14 @@ static inline bool dir_relax(struct inode *inode)
74930 return !IS_DEADDIR(inode);
74931 }
74932
74933 +static inline bool is_sidechannel_device(const struct inode *inode)
74934 +{
74935 +#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
74936 + umode_t mode = inode->i_mode;
74937 + return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
74938 +#else
74939 + return false;
74940 +#endif
74941 +}
74942 +
74943 #endif /* _LINUX_FS_H */
74944 diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
74945 index 0efc3e6..e0e1e5f 100644
74946 --- a/include/linux/fs_struct.h
74947 +++ b/include/linux/fs_struct.h
74948 @@ -6,7 +6,7 @@
74949 #include <linux/seqlock.h>
74950
74951 struct fs_struct {
74952 - int users;
74953 + atomic_t users;
74954 spinlock_t lock;
74955 seqcount_t seq;
74956 int umask;
74957 diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
74958 index 7823e9e..56b6f2f 100644
74959 --- a/include/linux/fscache-cache.h
74960 +++ b/include/linux/fscache-cache.h
74961 @@ -113,7 +113,7 @@ struct fscache_operation {
74962 fscache_operation_release_t release;
74963 };
74964
74965 -extern atomic_t fscache_op_debug_id;
74966 +extern atomic_unchecked_t fscache_op_debug_id;
74967 extern void fscache_op_work_func(struct work_struct *work);
74968
74969 extern void fscache_enqueue_operation(struct fscache_operation *);
74970 @@ -135,7 +135,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
74971 INIT_WORK(&op->work, fscache_op_work_func);
74972 atomic_set(&op->usage, 1);
74973 op->state = FSCACHE_OP_ST_INITIALISED;
74974 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
74975 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
74976 op->processor = processor;
74977 op->release = release;
74978 INIT_LIST_HEAD(&op->pend_link);
74979 diff --git a/include/linux/fscache.h b/include/linux/fscache.h
74980 index 19b4645..3b73dfc 100644
74981 --- a/include/linux/fscache.h
74982 +++ b/include/linux/fscache.h
74983 @@ -152,7 +152,7 @@ struct fscache_cookie_def {
74984 * - this is mandatory for any object that may have data
74985 */
74986 void (*now_uncached)(void *cookie_netfs_data);
74987 -};
74988 +} __do_const;
74989
74990 /*
74991 * fscache cached network filesystem type
74992 diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
74993 index 1c804b0..1432c2b 100644
74994 --- a/include/linux/fsnotify.h
74995 +++ b/include/linux/fsnotify.h
74996 @@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
74997 struct inode *inode = file_inode(file);
74998 __u32 mask = FS_ACCESS;
74999
75000 + if (is_sidechannel_device(inode))
75001 + return;
75002 +
75003 if (S_ISDIR(inode->i_mode))
75004 mask |= FS_ISDIR;
75005
75006 @@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
75007 struct inode *inode = file_inode(file);
75008 __u32 mask = FS_MODIFY;
75009
75010 + if (is_sidechannel_device(inode))
75011 + return;
75012 +
75013 if (S_ISDIR(inode->i_mode))
75014 mask |= FS_ISDIR;
75015
75016 @@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
75017 */
75018 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
75019 {
75020 - return kstrdup(name, GFP_KERNEL);
75021 + return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
75022 }
75023
75024 /*
75025 diff --git a/include/linux/genhd.h b/include/linux/genhd.h
75026 index 9f3c275..8bdff5d 100644
75027 --- a/include/linux/genhd.h
75028 +++ b/include/linux/genhd.h
75029 @@ -194,7 +194,7 @@ struct gendisk {
75030 struct kobject *slave_dir;
75031
75032 struct timer_rand_state *random;
75033 - atomic_t sync_io; /* RAID */
75034 + atomic_unchecked_t sync_io; /* RAID */
75035 struct disk_events *ev;
75036 #ifdef CONFIG_BLK_DEV_INTEGRITY
75037 struct blk_integrity *integrity;
75038 @@ -435,7 +435,7 @@ extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
75039 extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
75040
75041 /* drivers/char/random.c */
75042 -extern void add_disk_randomness(struct gendisk *disk);
75043 +extern void add_disk_randomness(struct gendisk *disk) __latent_entropy;
75044 extern void rand_initialize_disk(struct gendisk *disk);
75045
75046 static inline sector_t get_start_sect(struct block_device *bdev)
75047 diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
75048 index 023bc34..b02b46a 100644
75049 --- a/include/linux/genl_magic_func.h
75050 +++ b/include/linux/genl_magic_func.h
75051 @@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
75052 },
75053
75054 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
75055 -static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
75056 +static struct genl_ops ZZZ_genl_ops[] = {
75057 #include GENL_MAGIC_INCLUDE_FILE
75058 };
75059
75060 diff --git a/include/linux/gfp.h b/include/linux/gfp.h
75061 index 9b4dd49..61fd41d 100644
75062 --- a/include/linux/gfp.h
75063 +++ b/include/linux/gfp.h
75064 @@ -35,6 +35,13 @@ struct vm_area_struct;
75065 #define ___GFP_NO_KSWAPD 0x400000u
75066 #define ___GFP_OTHER_NODE 0x800000u
75067 #define ___GFP_WRITE 0x1000000u
75068 +
75069 +#ifdef CONFIG_PAX_USERCOPY_SLABS
75070 +#define ___GFP_USERCOPY 0x2000000u
75071 +#else
75072 +#define ___GFP_USERCOPY 0
75073 +#endif
75074 +
75075 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
75076
75077 /*
75078 @@ -92,6 +99,7 @@ struct vm_area_struct;
75079 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
75080 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
75081 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
75082 +#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
75083
75084 /*
75085 * This may seem redundant, but it's a way of annotating false positives vs.
75086 @@ -99,7 +107,7 @@ struct vm_area_struct;
75087 */
75088 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
75089
75090 -#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
75091 +#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
75092 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
75093
75094 /* This equals 0, but use constants in case they ever change */
75095 @@ -153,6 +161,8 @@ struct vm_area_struct;
75096 /* 4GB DMA on some platforms */
75097 #define GFP_DMA32 __GFP_DMA32
75098
75099 +#define GFP_USERCOPY __GFP_USERCOPY
75100 +
75101 /* Convert GFP flags to their corresponding migrate type */
75102 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
75103 {
75104 diff --git a/include/linux/gracl.h b/include/linux/gracl.h
75105 new file mode 100644
75106 index 0000000..edb2cb6
75107 --- /dev/null
75108 +++ b/include/linux/gracl.h
75109 @@ -0,0 +1,340 @@
75110 +#ifndef GR_ACL_H
75111 +#define GR_ACL_H
75112 +
75113 +#include <linux/grdefs.h>
75114 +#include <linux/resource.h>
75115 +#include <linux/capability.h>
75116 +#include <linux/dcache.h>
75117 +#include <asm/resource.h>
75118 +
75119 +/* Major status information */
75120 +
75121 +#define GR_VERSION "grsecurity 3.0"
75122 +#define GRSECURITY_VERSION 0x3000
75123 +
75124 +enum {
75125 + GR_SHUTDOWN = 0,
75126 + GR_ENABLE = 1,
75127 + GR_SPROLE = 2,
75128 + GR_OLDRELOAD = 3,
75129 + GR_SEGVMOD = 4,
75130 + GR_STATUS = 5,
75131 + GR_UNSPROLE = 6,
75132 + GR_PASSSET = 7,
75133 + GR_SPROLEPAM = 8,
75134 + GR_RELOAD = 9,
75135 +};
75136 +
75137 +/* Password setup definitions
75138 + * kernel/grhash.c */
75139 +enum {
75140 + GR_PW_LEN = 128,
75141 + GR_SALT_LEN = 16,
75142 + GR_SHA_LEN = 32,
75143 +};
75144 +
75145 +enum {
75146 + GR_SPROLE_LEN = 64,
75147 +};
75148 +
75149 +enum {
75150 + GR_NO_GLOB = 0,
75151 + GR_REG_GLOB,
75152 + GR_CREATE_GLOB
75153 +};
75154 +
75155 +#define GR_NLIMITS 32
75156 +
75157 +/* Begin Data Structures */
75158 +
75159 +struct sprole_pw {
75160 + unsigned char *rolename;
75161 + unsigned char salt[GR_SALT_LEN];
75162 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
75163 +};
75164 +
75165 +struct name_entry {
75166 + __u32 key;
75167 + ino_t inode;
75168 + dev_t device;
75169 + char *name;
75170 + __u16 len;
75171 + __u8 deleted;
75172 + struct name_entry *prev;
75173 + struct name_entry *next;
75174 +};
75175 +
75176 +struct inodev_entry {
75177 + struct name_entry *nentry;
75178 + struct inodev_entry *prev;
75179 + struct inodev_entry *next;
75180 +};
75181 +
75182 +struct acl_role_db {
75183 + struct acl_role_label **r_hash;
75184 + __u32 r_size;
75185 +};
75186 +
75187 +struct inodev_db {
75188 + struct inodev_entry **i_hash;
75189 + __u32 i_size;
75190 +};
75191 +
75192 +struct name_db {
75193 + struct name_entry **n_hash;
75194 + __u32 n_size;
75195 +};
75196 +
75197 +struct crash_uid {
75198 + uid_t uid;
75199 + unsigned long expires;
75200 +};
75201 +
75202 +struct gr_hash_struct {
75203 + void **table;
75204 + void **nametable;
75205 + void *first;
75206 + __u32 table_size;
75207 + __u32 used_size;
75208 + int type;
75209 +};
75210 +
75211 +/* Userspace Grsecurity ACL data structures */
75212 +
75213 +struct acl_subject_label {
75214 + char *filename;
75215 + ino_t inode;
75216 + dev_t device;
75217 + __u32 mode;
75218 + kernel_cap_t cap_mask;
75219 + kernel_cap_t cap_lower;
75220 + kernel_cap_t cap_invert_audit;
75221 +
75222 + struct rlimit res[GR_NLIMITS];
75223 + __u32 resmask;
75224 +
75225 + __u8 user_trans_type;
75226 + __u8 group_trans_type;
75227 + uid_t *user_transitions;
75228 + gid_t *group_transitions;
75229 + __u16 user_trans_num;
75230 + __u16 group_trans_num;
75231 +
75232 + __u32 sock_families[2];
75233 + __u32 ip_proto[8];
75234 + __u32 ip_type;
75235 + struct acl_ip_label **ips;
75236 + __u32 ip_num;
75237 + __u32 inaddr_any_override;
75238 +
75239 + __u32 crashes;
75240 + unsigned long expires;
75241 +
75242 + struct acl_subject_label *parent_subject;
75243 + struct gr_hash_struct *hash;
75244 + struct acl_subject_label *prev;
75245 + struct acl_subject_label *next;
75246 +
75247 + struct acl_object_label **obj_hash;
75248 + __u32 obj_hash_size;
75249 + __u16 pax_flags;
75250 +};
75251 +
75252 +struct role_allowed_ip {
75253 + __u32 addr;
75254 + __u32 netmask;
75255 +
75256 + struct role_allowed_ip *prev;
75257 + struct role_allowed_ip *next;
75258 +};
75259 +
75260 +struct role_transition {
75261 + char *rolename;
75262 +
75263 + struct role_transition *prev;
75264 + struct role_transition *next;
75265 +};
75266 +
75267 +struct acl_role_label {
75268 + char *rolename;
75269 + uid_t uidgid;
75270 + __u16 roletype;
75271 +
75272 + __u16 auth_attempts;
75273 + unsigned long expires;
75274 +
75275 + struct acl_subject_label *root_label;
75276 + struct gr_hash_struct *hash;
75277 +
75278 + struct acl_role_label *prev;
75279 + struct acl_role_label *next;
75280 +
75281 + struct role_transition *transitions;
75282 + struct role_allowed_ip *allowed_ips;
75283 + uid_t *domain_children;
75284 + __u16 domain_child_num;
75285 +
75286 + umode_t umask;
75287 +
75288 + struct acl_subject_label **subj_hash;
75289 + __u32 subj_hash_size;
75290 +};
75291 +
75292 +struct user_acl_role_db {
75293 + struct acl_role_label **r_table;
75294 + __u32 num_pointers; /* Number of allocations to track */
75295 + __u32 num_roles; /* Number of roles */
75296 + __u32 num_domain_children; /* Number of domain children */
75297 + __u32 num_subjects; /* Number of subjects */
75298 + __u32 num_objects; /* Number of objects */
75299 +};
75300 +
75301 +struct acl_object_label {
75302 + char *filename;
75303 + ino_t inode;
75304 + dev_t device;
75305 + __u32 mode;
75306 +
75307 + struct acl_subject_label *nested;
75308 + struct acl_object_label *globbed;
75309 +
75310 + /* next two structures not used */
75311 +
75312 + struct acl_object_label *prev;
75313 + struct acl_object_label *next;
75314 +};
75315 +
75316 +struct acl_ip_label {
75317 + char *iface;
75318 + __u32 addr;
75319 + __u32 netmask;
75320 + __u16 low, high;
75321 + __u8 mode;
75322 + __u32 type;
75323 + __u32 proto[8];
75324 +
75325 + /* next two structures not used */
75326 +
75327 + struct acl_ip_label *prev;
75328 + struct acl_ip_label *next;
75329 +};
75330 +
75331 +struct gr_arg {
75332 + struct user_acl_role_db role_db;
75333 + unsigned char pw[GR_PW_LEN];
75334 + unsigned char salt[GR_SALT_LEN];
75335 + unsigned char sum[GR_SHA_LEN];
75336 + unsigned char sp_role[GR_SPROLE_LEN];
75337 + struct sprole_pw *sprole_pws;
75338 + dev_t segv_device;
75339 + ino_t segv_inode;
75340 + uid_t segv_uid;
75341 + __u16 num_sprole_pws;
75342 + __u16 mode;
75343 +};
75344 +
75345 +struct gr_arg_wrapper {
75346 + struct gr_arg *arg;
75347 + __u32 version;
75348 + __u32 size;
75349 +};
75350 +
75351 +struct subject_map {
75352 + struct acl_subject_label *user;
75353 + struct acl_subject_label *kernel;
75354 + struct subject_map *prev;
75355 + struct subject_map *next;
75356 +};
75357 +
75358 +struct acl_subj_map_db {
75359 + struct subject_map **s_hash;
75360 + __u32 s_size;
75361 +};
75362 +
75363 +struct gr_policy_state {
75364 + struct sprole_pw **acl_special_roles;
75365 + __u16 num_sprole_pws;
75366 + struct acl_role_label *kernel_role;
75367 + struct acl_role_label *role_list;
75368 + struct acl_role_label *default_role;
75369 + struct acl_role_db acl_role_set;
75370 + struct acl_subj_map_db subj_map_set;
75371 + struct name_db name_set;
75372 + struct inodev_db inodev_set;
75373 +};
75374 +
75375 +struct gr_alloc_state {
75376 + unsigned long alloc_stack_next;
75377 + unsigned long alloc_stack_size;
75378 + void **alloc_stack;
75379 +};
75380 +
75381 +struct gr_reload_state {
75382 + struct gr_policy_state oldpolicy;
75383 + struct gr_alloc_state oldalloc;
75384 + struct gr_policy_state newpolicy;
75385 + struct gr_alloc_state newalloc;
75386 + struct gr_policy_state *oldpolicy_ptr;
75387 + struct gr_alloc_state *oldalloc_ptr;
75388 + unsigned char oldmode;
75389 +};
75390 +
75391 +/* End Data Structures Section */
75392 +
75393 +/* Hash functions generated by empirical testing by Brad Spengler
75394 + Makes good use of the low bits of the inode. Generally 0-1 times
75395 + in loop for successful match. 0-3 for unsuccessful match.
75396 + Shift/add algorithm with modulus of table size and an XOR*/
75397 +
75398 +static __inline__ unsigned int
75399 +gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
75400 +{
75401 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
75402 +}
75403 +
75404 + static __inline__ unsigned int
75405 +gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
75406 +{
75407 + return ((const unsigned long)userp % sz);
75408 +}
75409 +
75410 +static __inline__ unsigned int
75411 +gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
75412 +{
75413 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
75414 +}
75415 +
75416 +static __inline__ unsigned int
75417 +gr_nhash(const char *name, const __u16 len, const unsigned int sz)
75418 +{
75419 + return full_name_hash((const unsigned char *)name, len) % sz;
75420 +}
75421 +
75422 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
75423 + subj = NULL; \
75424 + iter = 0; \
75425 + while (iter < role->subj_hash_size) { \
75426 + if (subj == NULL) \
75427 + subj = role->subj_hash[iter]; \
75428 + if (subj == NULL) { \
75429 + iter++; \
75430 + continue; \
75431 + }
75432 +
75433 +#define FOR_EACH_SUBJECT_END(subj,iter) \
75434 + subj = subj->next; \
75435 + if (subj == NULL) \
75436 + iter++; \
75437 + }
75438 +
75439 +
75440 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
75441 + subj = role->hash->first; \
75442 + while (subj != NULL) {
75443 +
75444 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
75445 + subj = subj->next; \
75446 + }
75447 +
75448 +#endif
75449 +
75450 diff --git a/include/linux/gracl_compat.h b/include/linux/gracl_compat.h
75451 new file mode 100644
75452 index 0000000..33ebd1f
75453 --- /dev/null
75454 +++ b/include/linux/gracl_compat.h
75455 @@ -0,0 +1,156 @@
75456 +#ifndef GR_ACL_COMPAT_H
75457 +#define GR_ACL_COMPAT_H
75458 +
75459 +#include <linux/resource.h>
75460 +#include <asm/resource.h>
75461 +
75462 +struct sprole_pw_compat {
75463 + compat_uptr_t rolename;
75464 + unsigned char salt[GR_SALT_LEN];
75465 + unsigned char sum[GR_SHA_LEN];
75466 +};
75467 +
75468 +struct gr_hash_struct_compat {
75469 + compat_uptr_t table;
75470 + compat_uptr_t nametable;
75471 + compat_uptr_t first;
75472 + __u32 table_size;
75473 + __u32 used_size;
75474 + int type;
75475 +};
75476 +
75477 +struct acl_subject_label_compat {
75478 + compat_uptr_t filename;
75479 + compat_ino_t inode;
75480 + __u32 device;
75481 + __u32 mode;
75482 + kernel_cap_t cap_mask;
75483 + kernel_cap_t cap_lower;
75484 + kernel_cap_t cap_invert_audit;
75485 +
75486 + struct compat_rlimit res[GR_NLIMITS];
75487 + __u32 resmask;
75488 +
75489 + __u8 user_trans_type;
75490 + __u8 group_trans_type;
75491 + compat_uptr_t user_transitions;
75492 + compat_uptr_t group_transitions;
75493 + __u16 user_trans_num;
75494 + __u16 group_trans_num;
75495 +
75496 + __u32 sock_families[2];
75497 + __u32 ip_proto[8];
75498 + __u32 ip_type;
75499 + compat_uptr_t ips;
75500 + __u32 ip_num;
75501 + __u32 inaddr_any_override;
75502 +
75503 + __u32 crashes;
75504 + compat_ulong_t expires;
75505 +
75506 + compat_uptr_t parent_subject;
75507 + compat_uptr_t hash;
75508 + compat_uptr_t prev;
75509 + compat_uptr_t next;
75510 +
75511 + compat_uptr_t obj_hash;
75512 + __u32 obj_hash_size;
75513 + __u16 pax_flags;
75514 +};
75515 +
75516 +struct role_allowed_ip_compat {
75517 + __u32 addr;
75518 + __u32 netmask;
75519 +
75520 + compat_uptr_t prev;
75521 + compat_uptr_t next;
75522 +};
75523 +
75524 +struct role_transition_compat {
75525 + compat_uptr_t rolename;
75526 +
75527 + compat_uptr_t prev;
75528 + compat_uptr_t next;
75529 +};
75530 +
75531 +struct acl_role_label_compat {
75532 + compat_uptr_t rolename;
75533 + uid_t uidgid;
75534 + __u16 roletype;
75535 +
75536 + __u16 auth_attempts;
75537 + compat_ulong_t expires;
75538 +
75539 + compat_uptr_t root_label;
75540 + compat_uptr_t hash;
75541 +
75542 + compat_uptr_t prev;
75543 + compat_uptr_t next;
75544 +
75545 + compat_uptr_t transitions;
75546 + compat_uptr_t allowed_ips;
75547 + compat_uptr_t domain_children;
75548 + __u16 domain_child_num;
75549 +
75550 + umode_t umask;
75551 +
75552 + compat_uptr_t subj_hash;
75553 + __u32 subj_hash_size;
75554 +};
75555 +
75556 +struct user_acl_role_db_compat {
75557 + compat_uptr_t r_table;
75558 + __u32 num_pointers;
75559 + __u32 num_roles;
75560 + __u32 num_domain_children;
75561 + __u32 num_subjects;
75562 + __u32 num_objects;
75563 +};
75564 +
75565 +struct acl_object_label_compat {
75566 + compat_uptr_t filename;
75567 + compat_ino_t inode;
75568 + __u32 device;
75569 + __u32 mode;
75570 +
75571 + compat_uptr_t nested;
75572 + compat_uptr_t globbed;
75573 +
75574 + compat_uptr_t prev;
75575 + compat_uptr_t next;
75576 +};
75577 +
75578 +struct acl_ip_label_compat {
75579 + compat_uptr_t iface;
75580 + __u32 addr;
75581 + __u32 netmask;
75582 + __u16 low, high;
75583 + __u8 mode;
75584 + __u32 type;
75585 + __u32 proto[8];
75586 +
75587 + compat_uptr_t prev;
75588 + compat_uptr_t next;
75589 +};
75590 +
75591 +struct gr_arg_compat {
75592 + struct user_acl_role_db_compat role_db;
75593 + unsigned char pw[GR_PW_LEN];
75594 + unsigned char salt[GR_SALT_LEN];
75595 + unsigned char sum[GR_SHA_LEN];
75596 + unsigned char sp_role[GR_SPROLE_LEN];
75597 + compat_uptr_t sprole_pws;
75598 + __u32 segv_device;
75599 + compat_ino_t segv_inode;
75600 + uid_t segv_uid;
75601 + __u16 num_sprole_pws;
75602 + __u16 mode;
75603 +};
75604 +
75605 +struct gr_arg_wrapper_compat {
75606 + compat_uptr_t arg;
75607 + __u32 version;
75608 + __u32 size;
75609 +};
75610 +
75611 +#endif
75612 diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
75613 new file mode 100644
75614 index 0000000..323ecf2
75615 --- /dev/null
75616 +++ b/include/linux/gralloc.h
75617 @@ -0,0 +1,9 @@
75618 +#ifndef __GRALLOC_H
75619 +#define __GRALLOC_H
75620 +
75621 +void acl_free_all(void);
75622 +int acl_alloc_stack_init(unsigned long size);
75623 +void *acl_alloc(unsigned long len);
75624 +void *acl_alloc_num(unsigned long num, unsigned long len);
75625 +
75626 +#endif
75627 diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
75628 new file mode 100644
75629 index 0000000..be66033
75630 --- /dev/null
75631 +++ b/include/linux/grdefs.h
75632 @@ -0,0 +1,140 @@
75633 +#ifndef GRDEFS_H
75634 +#define GRDEFS_H
75635 +
75636 +/* Begin grsecurity status declarations */
75637 +
75638 +enum {
75639 + GR_READY = 0x01,
75640 + GR_STATUS_INIT = 0x00 // disabled state
75641 +};
75642 +
75643 +/* Begin ACL declarations */
75644 +
75645 +/* Role flags */
75646 +
75647 +enum {
75648 + GR_ROLE_USER = 0x0001,
75649 + GR_ROLE_GROUP = 0x0002,
75650 + GR_ROLE_DEFAULT = 0x0004,
75651 + GR_ROLE_SPECIAL = 0x0008,
75652 + GR_ROLE_AUTH = 0x0010,
75653 + GR_ROLE_NOPW = 0x0020,
75654 + GR_ROLE_GOD = 0x0040,
75655 + GR_ROLE_LEARN = 0x0080,
75656 + GR_ROLE_TPE = 0x0100,
75657 + GR_ROLE_DOMAIN = 0x0200,
75658 + GR_ROLE_PAM = 0x0400,
75659 + GR_ROLE_PERSIST = 0x0800
75660 +};
75661 +
75662 +/* ACL Subject and Object mode flags */
75663 +enum {
75664 + GR_DELETED = 0x80000000
75665 +};
75666 +
75667 +/* ACL Object-only mode flags */
75668 +enum {
75669 + GR_READ = 0x00000001,
75670 + GR_APPEND = 0x00000002,
75671 + GR_WRITE = 0x00000004,
75672 + GR_EXEC = 0x00000008,
75673 + GR_FIND = 0x00000010,
75674 + GR_INHERIT = 0x00000020,
75675 + GR_SETID = 0x00000040,
75676 + GR_CREATE = 0x00000080,
75677 + GR_DELETE = 0x00000100,
75678 + GR_LINK = 0x00000200,
75679 + GR_AUDIT_READ = 0x00000400,
75680 + GR_AUDIT_APPEND = 0x00000800,
75681 + GR_AUDIT_WRITE = 0x00001000,
75682 + GR_AUDIT_EXEC = 0x00002000,
75683 + GR_AUDIT_FIND = 0x00004000,
75684 + GR_AUDIT_INHERIT= 0x00008000,
75685 + GR_AUDIT_SETID = 0x00010000,
75686 + GR_AUDIT_CREATE = 0x00020000,
75687 + GR_AUDIT_DELETE = 0x00040000,
75688 + GR_AUDIT_LINK = 0x00080000,
75689 + GR_PTRACERD = 0x00100000,
75690 + GR_NOPTRACE = 0x00200000,
75691 + GR_SUPPRESS = 0x00400000,
75692 + GR_NOLEARN = 0x00800000,
75693 + GR_INIT_TRANSFER= 0x01000000
75694 +};
75695 +
75696 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
75697 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
75698 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
75699 +
75700 +/* ACL subject-only mode flags */
75701 +enum {
75702 + GR_KILL = 0x00000001,
75703 + GR_VIEW = 0x00000002,
75704 + GR_PROTECTED = 0x00000004,
75705 + GR_LEARN = 0x00000008,
75706 + GR_OVERRIDE = 0x00000010,
75707 + /* just a placeholder, this mode is only used in userspace */
75708 + GR_DUMMY = 0x00000020,
75709 + GR_PROTSHM = 0x00000040,
75710 + GR_KILLPROC = 0x00000080,
75711 + GR_KILLIPPROC = 0x00000100,
75712 + /* just a placeholder, this mode is only used in userspace */
75713 + GR_NOTROJAN = 0x00000200,
75714 + GR_PROTPROCFD = 0x00000400,
75715 + GR_PROCACCT = 0x00000800,
75716 + GR_RELAXPTRACE = 0x00001000,
75717 + //GR_NESTED = 0x00002000,
75718 + GR_INHERITLEARN = 0x00004000,
75719 + GR_PROCFIND = 0x00008000,
75720 + GR_POVERRIDE = 0x00010000,
75721 + GR_KERNELAUTH = 0x00020000,
75722 + GR_ATSECURE = 0x00040000,
75723 + GR_SHMEXEC = 0x00080000
75724 +};
75725 +
75726 +enum {
75727 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
75728 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
75729 + GR_PAX_ENABLE_MPROTECT = 0x0004,
75730 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
75731 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
75732 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
75733 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
75734 + GR_PAX_DISABLE_MPROTECT = 0x0400,
75735 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
75736 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
75737 +};
75738 +
75739 +enum {
75740 + GR_ID_USER = 0x01,
75741 + GR_ID_GROUP = 0x02,
75742 +};
75743 +
75744 +enum {
75745 + GR_ID_ALLOW = 0x01,
75746 + GR_ID_DENY = 0x02,
75747 +};
75748 +
75749 +#define GR_CRASH_RES 31
75750 +#define GR_UIDTABLE_MAX 500
75751 +
75752 +/* begin resource learning section */
75753 +enum {
75754 + GR_RLIM_CPU_BUMP = 60,
75755 + GR_RLIM_FSIZE_BUMP = 50000,
75756 + GR_RLIM_DATA_BUMP = 10000,
75757 + GR_RLIM_STACK_BUMP = 1000,
75758 + GR_RLIM_CORE_BUMP = 10000,
75759 + GR_RLIM_RSS_BUMP = 500000,
75760 + GR_RLIM_NPROC_BUMP = 1,
75761 + GR_RLIM_NOFILE_BUMP = 5,
75762 + GR_RLIM_MEMLOCK_BUMP = 50000,
75763 + GR_RLIM_AS_BUMP = 500000,
75764 + GR_RLIM_LOCKS_BUMP = 2,
75765 + GR_RLIM_SIGPENDING_BUMP = 5,
75766 + GR_RLIM_MSGQUEUE_BUMP = 10000,
75767 + GR_RLIM_NICE_BUMP = 1,
75768 + GR_RLIM_RTPRIO_BUMP = 1,
75769 + GR_RLIM_RTTIME_BUMP = 1000000
75770 +};
75771 +
75772 +#endif
75773 diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
75774 new file mode 100644
75775 index 0000000..d25522e
75776 --- /dev/null
75777 +++ b/include/linux/grinternal.h
75778 @@ -0,0 +1,229 @@
75779 +#ifndef __GRINTERNAL_H
75780 +#define __GRINTERNAL_H
75781 +
75782 +#ifdef CONFIG_GRKERNSEC
75783 +
75784 +#include <linux/fs.h>
75785 +#include <linux/mnt_namespace.h>
75786 +#include <linux/nsproxy.h>
75787 +#include <linux/gracl.h>
75788 +#include <linux/grdefs.h>
75789 +#include <linux/grmsg.h>
75790 +
75791 +void gr_add_learn_entry(const char *fmt, ...)
75792 + __attribute__ ((format (printf, 1, 2)));
75793 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
75794 + const struct vfsmount *mnt);
75795 +__u32 gr_check_create(const struct dentry *new_dentry,
75796 + const struct dentry *parent,
75797 + const struct vfsmount *mnt, const __u32 mode);
75798 +int gr_check_protected_task(const struct task_struct *task);
75799 +__u32 to_gr_audit(const __u32 reqmode);
75800 +int gr_set_acls(const int type);
75801 +int gr_acl_is_enabled(void);
75802 +char gr_roletype_to_char(void);
75803 +
75804 +void gr_handle_alertkill(struct task_struct *task);
75805 +char *gr_to_filename(const struct dentry *dentry,
75806 + const struct vfsmount *mnt);
75807 +char *gr_to_filename1(const struct dentry *dentry,
75808 + const struct vfsmount *mnt);
75809 +char *gr_to_filename2(const struct dentry *dentry,
75810 + const struct vfsmount *mnt);
75811 +char *gr_to_filename3(const struct dentry *dentry,
75812 + const struct vfsmount *mnt);
75813 +
75814 +extern int grsec_enable_ptrace_readexec;
75815 +extern int grsec_enable_harden_ptrace;
75816 +extern int grsec_enable_link;
75817 +extern int grsec_enable_fifo;
75818 +extern int grsec_enable_execve;
75819 +extern int grsec_enable_shm;
75820 +extern int grsec_enable_execlog;
75821 +extern int grsec_enable_signal;
75822 +extern int grsec_enable_audit_ptrace;
75823 +extern int grsec_enable_forkfail;
75824 +extern int grsec_enable_time;
75825 +extern int grsec_enable_rofs;
75826 +extern int grsec_deny_new_usb;
75827 +extern int grsec_enable_chroot_shmat;
75828 +extern int grsec_enable_chroot_mount;
75829 +extern int grsec_enable_chroot_double;
75830 +extern int grsec_enable_chroot_pivot;
75831 +extern int grsec_enable_chroot_chdir;
75832 +extern int grsec_enable_chroot_chmod;
75833 +extern int grsec_enable_chroot_mknod;
75834 +extern int grsec_enable_chroot_fchdir;
75835 +extern int grsec_enable_chroot_nice;
75836 +extern int grsec_enable_chroot_execlog;
75837 +extern int grsec_enable_chroot_caps;
75838 +extern int grsec_enable_chroot_sysctl;
75839 +extern int grsec_enable_chroot_unix;
75840 +extern int grsec_enable_symlinkown;
75841 +extern kgid_t grsec_symlinkown_gid;
75842 +extern int grsec_enable_tpe;
75843 +extern kgid_t grsec_tpe_gid;
75844 +extern int grsec_enable_tpe_all;
75845 +extern int grsec_enable_tpe_invert;
75846 +extern int grsec_enable_socket_all;
75847 +extern kgid_t grsec_socket_all_gid;
75848 +extern int grsec_enable_socket_client;
75849 +extern kgid_t grsec_socket_client_gid;
75850 +extern int grsec_enable_socket_server;
75851 +extern kgid_t grsec_socket_server_gid;
75852 +extern kgid_t grsec_audit_gid;
75853 +extern int grsec_enable_group;
75854 +extern int grsec_enable_log_rwxmaps;
75855 +extern int grsec_enable_mount;
75856 +extern int grsec_enable_chdir;
75857 +extern int grsec_resource_logging;
75858 +extern int grsec_enable_blackhole;
75859 +extern int grsec_lastack_retries;
75860 +extern int grsec_enable_brute;
75861 +extern int grsec_enable_harden_ipc;
75862 +extern int grsec_lock;
75863 +
75864 +extern spinlock_t grsec_alert_lock;
75865 +extern unsigned long grsec_alert_wtime;
75866 +extern unsigned long grsec_alert_fyet;
75867 +
75868 +extern spinlock_t grsec_audit_lock;
75869 +
75870 +extern rwlock_t grsec_exec_file_lock;
75871 +
75872 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
75873 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
75874 + (tsk)->exec_file->f_path.mnt) : "/")
75875 +
75876 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
75877 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
75878 + (tsk)->real_parent->exec_file->f_path.mnt) : "/")
75879 +
75880 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
75881 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
75882 + (tsk)->exec_file->f_path.mnt) : "/")
75883 +
75884 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
75885 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
75886 + (tsk)->real_parent->exec_file->f_path.mnt) : "/")
75887 +
75888 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
75889 +
75890 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
75891 +
75892 +static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
75893 +{
75894 + if (file1 && file2) {
75895 + const struct inode *inode1 = file1->f_path.dentry->d_inode;
75896 + const struct inode *inode2 = file2->f_path.dentry->d_inode;
75897 + if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
75898 + return true;
75899 + }
75900 +
75901 + return false;
75902 +}
75903 +
75904 +#define GR_CHROOT_CAPS {{ \
75905 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
75906 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
75907 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
75908 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
75909 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
75910 + CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
75911 + CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
75912 +
75913 +#define security_learn(normal_msg,args...) \
75914 +({ \
75915 + read_lock(&grsec_exec_file_lock); \
75916 + gr_add_learn_entry(normal_msg "\n", ## args); \
75917 + read_unlock(&grsec_exec_file_lock); \
75918 +})
75919 +
75920 +enum {
75921 + GR_DO_AUDIT,
75922 + GR_DONT_AUDIT,
75923 + /* used for non-audit messages that we shouldn't kill the task on */
75924 + GR_DONT_AUDIT_GOOD
75925 +};
75926 +
75927 +enum {
75928 + GR_TTYSNIFF,
75929 + GR_RBAC,
75930 + GR_RBAC_STR,
75931 + GR_STR_RBAC,
75932 + GR_RBAC_MODE2,
75933 + GR_RBAC_MODE3,
75934 + GR_FILENAME,
75935 + GR_SYSCTL_HIDDEN,
75936 + GR_NOARGS,
75937 + GR_ONE_INT,
75938 + GR_ONE_INT_TWO_STR,
75939 + GR_ONE_STR,
75940 + GR_STR_INT,
75941 + GR_TWO_STR_INT,
75942 + GR_TWO_INT,
75943 + GR_TWO_U64,
75944 + GR_THREE_INT,
75945 + GR_FIVE_INT_TWO_STR,
75946 + GR_TWO_STR,
75947 + GR_THREE_STR,
75948 + GR_FOUR_STR,
75949 + GR_STR_FILENAME,
75950 + GR_FILENAME_STR,
75951 + GR_FILENAME_TWO_INT,
75952 + GR_FILENAME_TWO_INT_STR,
75953 + GR_TEXTREL,
75954 + GR_PTRACE,
75955 + GR_RESOURCE,
75956 + GR_CAP,
75957 + GR_SIG,
75958 + GR_SIG2,
75959 + GR_CRASH1,
75960 + GR_CRASH2,
75961 + GR_PSACCT,
75962 + GR_RWXMAP,
75963 + GR_RWXMAPVMA
75964 +};
75965 +
75966 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
75967 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
75968 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
75969 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
75970 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
75971 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
75972 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
75973 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
75974 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
75975 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
75976 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
75977 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
75978 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
75979 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
75980 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
75981 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
75982 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
75983 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
75984 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
75985 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
75986 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
75987 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
75988 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
75989 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
75990 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
75991 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
75992 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
75993 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
75994 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
75995 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
75996 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
75997 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
75998 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
75999 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
76000 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
76001 +#define gr_log_rwxmap_vma(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAPVMA, str)
76002 +
76003 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
76004 +
76005 +#endif
76006 +
76007 +#endif
76008 diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
76009 new file mode 100644
76010 index 0000000..2b07594
76011 --- /dev/null
76012 +++ b/include/linux/grmsg.h
76013 @@ -0,0 +1,115 @@
76014 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
76015 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
76016 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
76017 +#define GR_STOPMOD_MSG "denied modification of module state by "
76018 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
76019 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
76020 +#define GR_IOPERM_MSG "denied use of ioperm() by "
76021 +#define GR_IOPL_MSG "denied use of iopl() by "
76022 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
76023 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
76024 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
76025 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
76026 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
76027 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
76028 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
76029 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
76030 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
76031 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
76032 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
76033 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
76034 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
76035 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
76036 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
76037 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
76038 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
76039 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
76040 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
76041 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
76042 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
76043 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
76044 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
76045 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
76046 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
76047 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
76048 +#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
76049 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
76050 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
76051 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
76052 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
76053 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
76054 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
76055 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
76056 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
76057 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
76058 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
76059 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
76060 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
76061 +#define GR_SETXATTR_ACL_MSG "%s setting extended attribute of %.950s by "
76062 +#define GR_REMOVEXATTR_ACL_MSG "%s removing extended attribute of %.950s by "
76063 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
76064 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
76065 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
76066 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
76067 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
76068 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
76069 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
76070 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
76071 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
76072 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
76073 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
76074 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
76075 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
76076 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
76077 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
76078 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
76079 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
76080 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
76081 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
76082 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
76083 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
76084 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
76085 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
76086 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
76087 +#define GR_NICE_CHROOT_MSG "denied priority change by "
76088 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
76089 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
76090 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
76091 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
76092 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
76093 +#define GR_TIME_MSG "time set by "
76094 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
76095 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
76096 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
76097 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
76098 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
76099 +#define GR_BIND_MSG "denied bind() by "
76100 +#define GR_CONNECT_MSG "denied connect() by "
76101 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
76102 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
76103 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
76104 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
76105 +#define GR_CAP_ACL_MSG "use of %s denied for "
76106 +#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
76107 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
76108 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
76109 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
76110 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
76111 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
76112 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
76113 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
76114 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
76115 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
76116 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
76117 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
76118 +#define GR_TEXTREL_AUDIT_MSG "denied text relocation in %.950s, VMA:0x%08lx 0x%08lx by "
76119 +#define GR_PTGNUSTACK_MSG "denied marking stack executable as requested by PT_GNU_STACK marking in %.950s by "
76120 +#define GR_VM86_MSG "denied use of vm86 by "
76121 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
76122 +#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
76123 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
76124 +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
76125 +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
76126 +#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
76127 +#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
76128 +#define GR_IPC_DENIED_MSG "denied %s of globally-%sable IPC with creator uid %u by "
76129 diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
76130 new file mode 100644
76131 index 0000000..d8b5b48
76132 --- /dev/null
76133 +++ b/include/linux/grsecurity.h
76134 @@ -0,0 +1,245 @@
76135 +#ifndef GR_SECURITY_H
76136 +#define GR_SECURITY_H
76137 +#include <linux/fs.h>
76138 +#include <linux/fs_struct.h>
76139 +#include <linux/binfmts.h>
76140 +#include <linux/gracl.h>
76141 +
76142 +/* notify of brain-dead configs */
76143 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
76144 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
76145 +#endif
76146 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
76147 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
76148 +#endif
76149 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
76150 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
76151 +#endif
76152 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
76153 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
76154 +#endif
76155 +
76156 +int gr_handle_new_usb(void);
76157 +
76158 +void gr_handle_brute_attach(int dumpable);
76159 +void gr_handle_brute_check(void);
76160 +void gr_handle_kernel_exploit(void);
76161 +
76162 +char gr_roletype_to_char(void);
76163 +
76164 +int gr_acl_enable_at_secure(void);
76165 +
76166 +int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
76167 +int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
76168 +
76169 +void gr_del_task_from_ip_table(struct task_struct *p);
76170 +
76171 +int gr_pid_is_chrooted(struct task_struct *p);
76172 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
76173 +int gr_handle_chroot_nice(void);
76174 +int gr_handle_chroot_sysctl(const int op);
76175 +int gr_handle_chroot_setpriority(struct task_struct *p,
76176 + const int niceval);
76177 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
76178 +int gr_handle_chroot_chroot(const struct dentry *dentry,
76179 + const struct vfsmount *mnt);
76180 +void gr_handle_chroot_chdir(const struct path *path);
76181 +int gr_handle_chroot_chmod(const struct dentry *dentry,
76182 + const struct vfsmount *mnt, const int mode);
76183 +int gr_handle_chroot_mknod(const struct dentry *dentry,
76184 + const struct vfsmount *mnt, const int mode);
76185 +int gr_handle_chroot_mount(const struct dentry *dentry,
76186 + const struct vfsmount *mnt,
76187 + const char *dev_name);
76188 +int gr_handle_chroot_pivot(void);
76189 +int gr_handle_chroot_unix(const pid_t pid);
76190 +
76191 +int gr_handle_rawio(const struct inode *inode);
76192 +
76193 +void gr_handle_ioperm(void);
76194 +void gr_handle_iopl(void);
76195 +
76196 +umode_t gr_acl_umask(void);
76197 +
76198 +int gr_tpe_allow(const struct file *file);
76199 +
76200 +void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
76201 +void gr_clear_chroot_entries(struct task_struct *task);
76202 +
76203 +void gr_log_forkfail(const int retval);
76204 +void gr_log_timechange(void);
76205 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
76206 +void gr_log_chdir(const struct dentry *dentry,
76207 + const struct vfsmount *mnt);
76208 +void gr_log_chroot_exec(const struct dentry *dentry,
76209 + const struct vfsmount *mnt);
76210 +void gr_log_remount(const char *devname, const int retval);
76211 +void gr_log_unmount(const char *devname, const int retval);
76212 +void gr_log_mount(const char *from, const char *to, const int retval);
76213 +void gr_log_textrel(struct vm_area_struct *vma);
76214 +void gr_log_ptgnustack(struct file *file);
76215 +void gr_log_rwxmmap(struct file *file);
76216 +void gr_log_rwxmprotect(struct vm_area_struct *vma);
76217 +
76218 +int gr_handle_follow_link(const struct inode *parent,
76219 + const struct inode *inode,
76220 + const struct dentry *dentry,
76221 + const struct vfsmount *mnt);
76222 +int gr_handle_fifo(const struct dentry *dentry,
76223 + const struct vfsmount *mnt,
76224 + const struct dentry *dir, const int flag,
76225 + const int acc_mode);
76226 +int gr_handle_hardlink(const struct dentry *dentry,
76227 + const struct vfsmount *mnt,
76228 + struct inode *inode,
76229 + const int mode, const struct filename *to);
76230 +
76231 +int gr_is_capable(const int cap);
76232 +int gr_is_capable_nolog(const int cap);
76233 +int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
76234 +int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
76235 +
76236 +void gr_copy_label(struct task_struct *tsk);
76237 +void gr_handle_crash(struct task_struct *task, const int sig);
76238 +int gr_handle_signal(const struct task_struct *p, const int sig);
76239 +int gr_check_crash_uid(const kuid_t uid);
76240 +int gr_check_protected_task(const struct task_struct *task);
76241 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
76242 +int gr_acl_handle_mmap(const struct file *file,
76243 + const unsigned long prot);
76244 +int gr_acl_handle_mprotect(const struct file *file,
76245 + const unsigned long prot);
76246 +int gr_check_hidden_task(const struct task_struct *tsk);
76247 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
76248 + const struct vfsmount *mnt);
76249 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
76250 + const struct vfsmount *mnt);
76251 +__u32 gr_acl_handle_access(const struct dentry *dentry,
76252 + const struct vfsmount *mnt, const int fmode);
76253 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
76254 + const struct vfsmount *mnt, umode_t *mode);
76255 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
76256 + const struct vfsmount *mnt);
76257 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
76258 + const struct vfsmount *mnt);
76259 +__u32 gr_acl_handle_removexattr(const struct dentry *dentry,
76260 + const struct vfsmount *mnt);
76261 +int gr_handle_ptrace(struct task_struct *task, const long request);
76262 +int gr_handle_proc_ptrace(struct task_struct *task);
76263 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
76264 + const struct vfsmount *mnt);
76265 +int gr_check_crash_exec(const struct file *filp);
76266 +int gr_acl_is_enabled(void);
76267 +void gr_set_role_label(struct task_struct *task, const kuid_t uid,
76268 + const kgid_t gid);
76269 +int gr_set_proc_label(const struct dentry *dentry,
76270 + const struct vfsmount *mnt,
76271 + const int unsafe_flags);
76272 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
76273 + const struct vfsmount *mnt);
76274 +__u32 gr_acl_handle_open(const struct dentry *dentry,
76275 + const struct vfsmount *mnt, int acc_mode);
76276 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
76277 + const struct dentry *p_dentry,
76278 + const struct vfsmount *p_mnt,
76279 + int open_flags, int acc_mode, const int imode);
76280 +void gr_handle_create(const struct dentry *dentry,
76281 + const struct vfsmount *mnt);
76282 +void gr_handle_proc_create(const struct dentry *dentry,
76283 + const struct inode *inode);
76284 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
76285 + const struct dentry *parent_dentry,
76286 + const struct vfsmount *parent_mnt,
76287 + const int mode);
76288 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
76289 + const struct dentry *parent_dentry,
76290 + const struct vfsmount *parent_mnt);
76291 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
76292 + const struct vfsmount *mnt);
76293 +void gr_handle_delete(const ino_t ino, const dev_t dev);
76294 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
76295 + const struct vfsmount *mnt);
76296 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
76297 + const struct dentry *parent_dentry,
76298 + const struct vfsmount *parent_mnt,
76299 + const struct filename *from);
76300 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
76301 + const struct dentry *parent_dentry,
76302 + const struct vfsmount *parent_mnt,
76303 + const struct dentry *old_dentry,
76304 + const struct vfsmount *old_mnt, const struct filename *to);
76305 +int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
76306 +int gr_acl_handle_rename(struct dentry *new_dentry,
76307 + struct dentry *parent_dentry,
76308 + const struct vfsmount *parent_mnt,
76309 + struct dentry *old_dentry,
76310 + struct inode *old_parent_inode,
76311 + struct vfsmount *old_mnt, const struct filename *newname);
76312 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
76313 + struct dentry *old_dentry,
76314 + struct dentry *new_dentry,
76315 + struct vfsmount *mnt, const __u8 replace);
76316 +__u32 gr_check_link(const struct dentry *new_dentry,
76317 + const struct dentry *parent_dentry,
76318 + const struct vfsmount *parent_mnt,
76319 + const struct dentry *old_dentry,
76320 + const struct vfsmount *old_mnt);
76321 +int gr_acl_handle_filldir(const struct file *file, const char *name,
76322 + const unsigned int namelen, const ino_t ino);
76323 +
76324 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
76325 + const struct vfsmount *mnt);
76326 +void gr_acl_handle_exit(void);
76327 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
76328 +int gr_acl_handle_procpidmem(const struct task_struct *task);
76329 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
76330 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
76331 +void gr_audit_ptrace(struct task_struct *task);
76332 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
76333 +void gr_put_exec_file(struct task_struct *task);
76334 +
76335 +int gr_ptrace_readexec(struct file *file, int unsafe_flags);
76336 +
76337 +#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
76338 +extern void gr_learn_resource(const struct task_struct *task, const int res,
76339 + const unsigned long wanted, const int gt);
76340 +#else
76341 +static inline void gr_learn_resource(const struct task_struct *task, const int res,
76342 + const unsigned long wanted, const int gt)
76343 +{
76344 +}
76345 +#endif
76346 +
76347 +#ifdef CONFIG_GRKERNSEC_RESLOG
76348 +extern void gr_log_resource(const struct task_struct *task, const int res,
76349 + const unsigned long wanted, const int gt);
76350 +#else
76351 +static inline void gr_log_resource(const struct task_struct *task, const int res,
76352 + const unsigned long wanted, const int gt)
76353 +{
76354 +}
76355 +#endif
76356 +
76357 +#ifdef CONFIG_GRKERNSEC
76358 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
76359 +void gr_handle_vm86(void);
76360 +void gr_handle_mem_readwrite(u64 from, u64 to);
76361 +
76362 +void gr_log_badprocpid(const char *entry);
76363 +
76364 +extern int grsec_enable_dmesg;
76365 +extern int grsec_disable_privio;
76366 +
76367 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
76368 +extern kgid_t grsec_proc_gid;
76369 +#endif
76370 +
76371 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76372 +extern int grsec_enable_chroot_findtask;
76373 +#endif
76374 +#ifdef CONFIG_GRKERNSEC_SETXID
76375 +extern int grsec_enable_setxid;
76376 +#endif
76377 +#endif
76378 +
76379 +#endif
76380 diff --git a/include/linux/grsock.h b/include/linux/grsock.h
76381 new file mode 100644
76382 index 0000000..e7ffaaf
76383 --- /dev/null
76384 +++ b/include/linux/grsock.h
76385 @@ -0,0 +1,19 @@
76386 +#ifndef __GRSOCK_H
76387 +#define __GRSOCK_H
76388 +
76389 +extern void gr_attach_curr_ip(const struct sock *sk);
76390 +extern int gr_handle_sock_all(const int family, const int type,
76391 + const int protocol);
76392 +extern int gr_handle_sock_server(const struct sockaddr *sck);
76393 +extern int gr_handle_sock_server_other(const struct sock *sck);
76394 +extern int gr_handle_sock_client(const struct sockaddr *sck);
76395 +extern int gr_search_connect(struct socket * sock,
76396 + struct sockaddr_in * addr);
76397 +extern int gr_search_bind(struct socket * sock,
76398 + struct sockaddr_in * addr);
76399 +extern int gr_search_listen(struct socket * sock);
76400 +extern int gr_search_accept(struct socket * sock);
76401 +extern int gr_search_socket(const int domain, const int type,
76402 + const int protocol);
76403 +
76404 +#endif
76405 diff --git a/include/linux/highmem.h b/include/linux/highmem.h
76406 index 7fb31da..08b5114 100644
76407 --- a/include/linux/highmem.h
76408 +++ b/include/linux/highmem.h
76409 @@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
76410 kunmap_atomic(kaddr);
76411 }
76412
76413 +static inline void sanitize_highpage(struct page *page)
76414 +{
76415 + void *kaddr;
76416 + unsigned long flags;
76417 +
76418 + local_irq_save(flags);
76419 + kaddr = kmap_atomic(page);
76420 + clear_page(kaddr);
76421 + kunmap_atomic(kaddr);
76422 + local_irq_restore(flags);
76423 +}
76424 +
76425 static inline void zero_user_segments(struct page *page,
76426 unsigned start1, unsigned end1,
76427 unsigned start2, unsigned end2)
76428 diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
76429 index 1c7b89a..7dda400 100644
76430 --- a/include/linux/hwmon-sysfs.h
76431 +++ b/include/linux/hwmon-sysfs.h
76432 @@ -25,7 +25,8 @@
76433 struct sensor_device_attribute{
76434 struct device_attribute dev_attr;
76435 int index;
76436 -};
76437 +} __do_const;
76438 +typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
76439 #define to_sensor_dev_attr(_dev_attr) \
76440 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
76441
76442 @@ -41,7 +42,8 @@ struct sensor_device_attribute_2 {
76443 struct device_attribute dev_attr;
76444 u8 index;
76445 u8 nr;
76446 -};
76447 +} __do_const;
76448 +typedef struct sensor_device_attribute_2 __no_const sensor_device_attribute_2_no_const;
76449 #define to_sensor_dev_attr_2(_dev_attr) \
76450 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
76451
76452 diff --git a/include/linux/i2c.h b/include/linux/i2c.h
76453 index 2ab11dc..663a3f2 100644
76454 --- a/include/linux/i2c.h
76455 +++ b/include/linux/i2c.h
76456 @@ -366,6 +366,7 @@ struct i2c_algorithm {
76457 /* To determine what the adapter supports */
76458 u32 (*functionality) (struct i2c_adapter *);
76459 };
76460 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
76461
76462 /**
76463 * struct i2c_bus_recovery_info - I2C bus recovery information
76464 diff --git a/include/linux/i2o.h b/include/linux/i2o.h
76465 index d23c3c2..eb63c81 100644
76466 --- a/include/linux/i2o.h
76467 +++ b/include/linux/i2o.h
76468 @@ -565,7 +565,7 @@ struct i2o_controller {
76469 struct i2o_device *exec; /* Executive */
76470 #if BITS_PER_LONG == 64
76471 spinlock_t context_list_lock; /* lock for context_list */
76472 - atomic_t context_list_counter; /* needed for unique contexts */
76473 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
76474 struct list_head context_list; /* list of context id's
76475 and pointers */
76476 #endif
76477 diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
76478 index aff7ad8..3942bbd 100644
76479 --- a/include/linux/if_pppox.h
76480 +++ b/include/linux/if_pppox.h
76481 @@ -76,7 +76,7 @@ struct pppox_proto {
76482 int (*ioctl)(struct socket *sock, unsigned int cmd,
76483 unsigned long arg);
76484 struct module *owner;
76485 -};
76486 +} __do_const;
76487
76488 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
76489 extern void unregister_pppox_proto(int proto_num);
76490 diff --git a/include/linux/init.h b/include/linux/init.h
76491 index f1c27a71..7d6010e 100644
76492 --- a/include/linux/init.h
76493 +++ b/include/linux/init.h
76494 @@ -39,9 +39,17 @@
76495 * Also note, that this data cannot be "const".
76496 */
76497
76498 +#define add_init_latent_entropy __latent_entropy
76499 +
76500 +#ifdef CONFIG_MEMORY_HOTPLUG
76501 +#define add_meminit_latent_entropy
76502 +#else
76503 +#define add_meminit_latent_entropy __latent_entropy
76504 +#endif
76505 +
76506 /* These are for everybody (although not all archs will actually
76507 discard it in modules) */
76508 -#define __init __section(.init.text) __cold notrace
76509 +#define __init __section(.init.text) __cold notrace add_init_latent_entropy
76510 #define __initdata __section(.init.data)
76511 #define __initconst __constsection(.init.rodata)
76512 #define __exitdata __section(.exit.data)
76513 @@ -102,7 +110,7 @@
76514 #define __cpuexitconst
76515
76516 /* Used for MEMORY_HOTPLUG */
76517 -#define __meminit __section(.meminit.text) __cold notrace
76518 +#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
76519 #define __meminitdata __section(.meminit.data)
76520 #define __meminitconst __constsection(.meminit.rodata)
76521 #define __memexit __section(.memexit.text) __exitused __cold notrace
76522 diff --git a/include/linux/init_task.h b/include/linux/init_task.h
76523 index 5cd0f09..c9f67cc 100644
76524 --- a/include/linux/init_task.h
76525 +++ b/include/linux/init_task.h
76526 @@ -154,6 +154,12 @@ extern struct task_group root_task_group;
76527
76528 #define INIT_TASK_COMM "swapper"
76529
76530 +#ifdef CONFIG_X86
76531 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
76532 +#else
76533 +#define INIT_TASK_THREAD_INFO
76534 +#endif
76535 +
76536 /*
76537 * INIT_TASK is used to set up the first task table, touch at
76538 * your own risk!. Base=0, limit=0x1fffff (=2MB)
76539 @@ -193,6 +199,7 @@ extern struct task_group root_task_group;
76540 RCU_POINTER_INITIALIZER(cred, &init_cred), \
76541 .comm = INIT_TASK_COMM, \
76542 .thread = INIT_THREAD, \
76543 + INIT_TASK_THREAD_INFO \
76544 .fs = &init_fs, \
76545 .files = &init_files, \
76546 .signal = &init_signals, \
76547 diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
76548 index 5e865b5..71bd258 100644
76549 --- a/include/linux/interrupt.h
76550 +++ b/include/linux/interrupt.h
76551 @@ -361,7 +361,7 @@ enum
76552 /* map softirq index to softirq name. update 'softirq_to_name' in
76553 * kernel/softirq.c when adding a new softirq.
76554 */
76555 -extern char *softirq_to_name[NR_SOFTIRQS];
76556 +extern const char * const softirq_to_name[NR_SOFTIRQS];
76557
76558 /* softirq mask and active fields moved to irq_cpustat_t in
76559 * asm/hardirq.h to get better cache usage. KAO
76560 @@ -369,12 +369,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
76561
76562 struct softirq_action
76563 {
76564 - void (*action)(struct softirq_action *);
76565 -};
76566 + void (*action)(void);
76567 +} __no_const;
76568
76569 asmlinkage void do_softirq(void);
76570 asmlinkage void __do_softirq(void);
76571 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
76572 +extern void open_softirq(int nr, void (*action)(void));
76573 extern void softirq_init(void);
76574 extern void __raise_softirq_irqoff(unsigned int nr);
76575
76576 diff --git a/include/linux/iommu.h b/include/linux/iommu.h
76577 index 7ea319e..f9e971d 100644
76578 --- a/include/linux/iommu.h
76579 +++ b/include/linux/iommu.h
76580 @@ -129,7 +129,7 @@ struct iommu_ops {
76581 u32 (*domain_get_windows)(struct iommu_domain *domain);
76582
76583 unsigned long pgsize_bitmap;
76584 -};
76585 +} __do_const;
76586
76587 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
76588 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
76589 diff --git a/include/linux/ioport.h b/include/linux/ioport.h
76590 index 89b7c24..382af74 100644
76591 --- a/include/linux/ioport.h
76592 +++ b/include/linux/ioport.h
76593 @@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
76594 int adjust_resource(struct resource *res, resource_size_t start,
76595 resource_size_t size);
76596 resource_size_t resource_alignment(struct resource *res);
76597 -static inline resource_size_t resource_size(const struct resource *res)
76598 +static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
76599 {
76600 return res->end - res->start + 1;
76601 }
76602 diff --git a/include/linux/irq.h b/include/linux/irq.h
76603 index 56bb0dc..8ae94d62 100644
76604 --- a/include/linux/irq.h
76605 +++ b/include/linux/irq.h
76606 @@ -333,7 +333,8 @@ struct irq_chip {
76607 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
76608
76609 unsigned long flags;
76610 -};
76611 +} __do_const;
76612 +typedef struct irq_chip __no_const irq_chip_no_const;
76613
76614 /*
76615 * irq_chip specific flags
76616 diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
76617 index 0e5d9ec..46acb3a 100644
76618 --- a/include/linux/irqchip/arm-gic.h
76619 +++ b/include/linux/irqchip/arm-gic.h
76620 @@ -59,9 +59,11 @@
76621
76622 #ifndef __ASSEMBLY__
76623
76624 +#include <linux/irq.h>
76625 +
76626 struct device_node;
76627
76628 -extern struct irq_chip gic_arch_extn;
76629 +extern irq_chip_no_const gic_arch_extn;
76630
76631 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
76632 u32 offset, struct device_node *);
76633 diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
76634 index d235e88..8ccbe74 100644
76635 --- a/include/linux/jiffies.h
76636 +++ b/include/linux/jiffies.h
76637 @@ -292,14 +292,14 @@ extern unsigned long preset_lpj;
76638 /*
76639 * Convert various time units to each other:
76640 */
76641 -extern unsigned int jiffies_to_msecs(const unsigned long j);
76642 -extern unsigned int jiffies_to_usecs(const unsigned long j);
76643 -extern unsigned long msecs_to_jiffies(const unsigned int m);
76644 -extern unsigned long usecs_to_jiffies(const unsigned int u);
76645 -extern unsigned long timespec_to_jiffies(const struct timespec *value);
76646 +extern unsigned int jiffies_to_msecs(const unsigned long j) __intentional_overflow(-1);
76647 +extern unsigned int jiffies_to_usecs(const unsigned long j) __intentional_overflow(-1);
76648 +extern unsigned long msecs_to_jiffies(const unsigned int m) __intentional_overflow(-1);
76649 +extern unsigned long usecs_to_jiffies(const unsigned int u) __intentional_overflow(-1);
76650 +extern unsigned long timespec_to_jiffies(const struct timespec *value) __intentional_overflow(-1);
76651 extern void jiffies_to_timespec(const unsigned long jiffies,
76652 struct timespec *value);
76653 -extern unsigned long timeval_to_jiffies(const struct timeval *value);
76654 +extern unsigned long timeval_to_jiffies(const struct timeval *value) __intentional_overflow(-1);
76655 extern void jiffies_to_timeval(const unsigned long jiffies,
76656 struct timeval *value);
76657
76658 diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
76659 index 6883e19..e854fcb 100644
76660 --- a/include/linux/kallsyms.h
76661 +++ b/include/linux/kallsyms.h
76662 @@ -15,7 +15,8 @@
76663
76664 struct module;
76665
76666 -#ifdef CONFIG_KALLSYMS
76667 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
76668 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
76669 /* Lookup the address for a symbol. Returns 0 if not found. */
76670 unsigned long kallsyms_lookup_name(const char *name);
76671
76672 @@ -106,6 +107,21 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
76673 /* Stupid that this does nothing, but I didn't create this mess. */
76674 #define __print_symbol(fmt, addr)
76675 #endif /*CONFIG_KALLSYMS*/
76676 +#else /* when included by kallsyms.c, vsnprintf.c, kprobes.c, or
76677 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
76678 +extern unsigned long kallsyms_lookup_name(const char *name);
76679 +extern void __print_symbol(const char *fmt, unsigned long address);
76680 +extern int sprint_backtrace(char *buffer, unsigned long address);
76681 +extern int sprint_symbol(char *buffer, unsigned long address);
76682 +extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
76683 +const char *kallsyms_lookup(unsigned long addr,
76684 + unsigned long *symbolsize,
76685 + unsigned long *offset,
76686 + char **modname, char *namebuf);
76687 +extern int kallsyms_lookup_size_offset(unsigned long addr,
76688 + unsigned long *symbolsize,
76689 + unsigned long *offset);
76690 +#endif
76691
76692 /* This macro allows us to keep printk typechecking */
76693 static __printf(1, 2)
76694 diff --git a/include/linux/key-type.h b/include/linux/key-type.h
76695 index 518a53a..5e28358 100644
76696 --- a/include/linux/key-type.h
76697 +++ b/include/linux/key-type.h
76698 @@ -125,7 +125,7 @@ struct key_type {
76699 /* internal fields */
76700 struct list_head link; /* link in types list */
76701 struct lock_class_key lock_class; /* key->sem lock class */
76702 -};
76703 +} __do_const;
76704
76705 extern struct key_type key_type_keyring;
76706
76707 diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
76708 index c6e091b..a940adf 100644
76709 --- a/include/linux/kgdb.h
76710 +++ b/include/linux/kgdb.h
76711 @@ -52,7 +52,7 @@ extern int kgdb_connected;
76712 extern int kgdb_io_module_registered;
76713
76714 extern atomic_t kgdb_setting_breakpoint;
76715 -extern atomic_t kgdb_cpu_doing_single_step;
76716 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
76717
76718 extern struct task_struct *kgdb_usethread;
76719 extern struct task_struct *kgdb_contthread;
76720 @@ -254,7 +254,7 @@ struct kgdb_arch {
76721 void (*correct_hw_break)(void);
76722
76723 void (*enable_nmi)(bool on);
76724 -};
76725 +} __do_const;
76726
76727 /**
76728 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
76729 @@ -279,7 +279,7 @@ struct kgdb_io {
76730 void (*pre_exception) (void);
76731 void (*post_exception) (void);
76732 int is_console;
76733 -};
76734 +} __do_const;
76735
76736 extern struct kgdb_arch arch_kgdb_ops;
76737
76738 diff --git a/include/linux/kmod.h b/include/linux/kmod.h
76739 index 0555cc6..40116ce 100644
76740 --- a/include/linux/kmod.h
76741 +++ b/include/linux/kmod.h
76742 @@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
76743 * usually useless though. */
76744 extern __printf(2, 3)
76745 int __request_module(bool wait, const char *name, ...);
76746 +extern __printf(3, 4)
76747 +int ___request_module(bool wait, char *param_name, const char *name, ...);
76748 #define request_module(mod...) __request_module(true, mod)
76749 #define request_module_nowait(mod...) __request_module(false, mod)
76750 #define try_then_request_module(x, mod...) \
76751 @@ -57,6 +59,9 @@ struct subprocess_info {
76752 struct work_struct work;
76753 struct completion *complete;
76754 char *path;
76755 +#ifdef CONFIG_GRKERNSEC
76756 + char *origpath;
76757 +#endif
76758 char **argv;
76759 char **envp;
76760 int wait;
76761 diff --git a/include/linux/kobject.h b/include/linux/kobject.h
76762 index de6dcbcc..4735f88 100644
76763 --- a/include/linux/kobject.h
76764 +++ b/include/linux/kobject.h
76765 @@ -115,7 +115,7 @@ struct kobj_type {
76766 struct attribute **default_attrs;
76767 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
76768 const void *(*namespace)(struct kobject *kobj);
76769 -};
76770 +} __do_const;
76771
76772 struct kobj_uevent_env {
76773 char *envp[UEVENT_NUM_ENVP];
76774 @@ -138,6 +138,7 @@ struct kobj_attribute {
76775 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
76776 const char *buf, size_t count);
76777 };
76778 +typedef struct kobj_attribute __no_const kobj_attribute_no_const;
76779
76780 extern const struct sysfs_ops kobj_sysfs_ops;
76781
76782 diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
76783 index df32d25..fb52e27 100644
76784 --- a/include/linux/kobject_ns.h
76785 +++ b/include/linux/kobject_ns.h
76786 @@ -44,7 +44,7 @@ struct kobj_ns_type_operations {
76787 const void *(*netlink_ns)(struct sock *sk);
76788 const void *(*initial_ns)(void);
76789 void (*drop_ns)(void *);
76790 -};
76791 +} __do_const;
76792
76793 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
76794 int kobj_ns_type_registered(enum kobj_ns_type type);
76795 diff --git a/include/linux/kref.h b/include/linux/kref.h
76796 index 484604d..0f6c5b6 100644
76797 --- a/include/linux/kref.h
76798 +++ b/include/linux/kref.h
76799 @@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
76800 static inline int kref_sub(struct kref *kref, unsigned int count,
76801 void (*release)(struct kref *kref))
76802 {
76803 - WARN_ON(release == NULL);
76804 + BUG_ON(release == NULL);
76805
76806 if (atomic_sub_and_test((int) count, &kref->refcount)) {
76807 release(kref);
76808 diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
76809 index 0fbbc7a..db081e3 100644
76810 --- a/include/linux/kvm_host.h
76811 +++ b/include/linux/kvm_host.h
76812 @@ -458,7 +458,7 @@ static inline void kvm_irqfd_exit(void)
76813 {
76814 }
76815 #endif
76816 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
76817 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
76818 struct module *module);
76819 void kvm_exit(void);
76820
76821 @@ -632,7 +632,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
76822 struct kvm_guest_debug *dbg);
76823 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
76824
76825 -int kvm_arch_init(void *opaque);
76826 +int kvm_arch_init(const void *opaque);
76827 void kvm_arch_exit(void);
76828
76829 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
76830 diff --git a/include/linux/libata.h b/include/linux/libata.h
76831 index 0e23c26..6ad8c33 100644
76832 --- a/include/linux/libata.h
76833 +++ b/include/linux/libata.h
76834 @@ -972,7 +972,7 @@ struct ata_port_operations {
76835 * fields must be pointers.
76836 */
76837 const struct ata_port_operations *inherits;
76838 -};
76839 +} __do_const;
76840
76841 struct ata_port_info {
76842 unsigned long flags;
76843 diff --git a/include/linux/linkage.h b/include/linux/linkage.h
76844 index d3e8ad2..a949f68 100644
76845 --- a/include/linux/linkage.h
76846 +++ b/include/linux/linkage.h
76847 @@ -31,6 +31,7 @@
76848 #endif
76849
76850 #define __page_aligned_data __section(.data..page_aligned) __aligned(PAGE_SIZE)
76851 +#define __page_aligned_rodata __read_only __aligned(PAGE_SIZE)
76852 #define __page_aligned_bss __section(.bss..page_aligned) __aligned(PAGE_SIZE)
76853
76854 /*
76855 diff --git a/include/linux/list.h b/include/linux/list.h
76856 index f4d8a2f..38e6e46 100644
76857 --- a/include/linux/list.h
76858 +++ b/include/linux/list.h
76859 @@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
76860 extern void list_del(struct list_head *entry);
76861 #endif
76862
76863 +extern void __pax_list_add(struct list_head *new,
76864 + struct list_head *prev,
76865 + struct list_head *next);
76866 +static inline void pax_list_add(struct list_head *new, struct list_head *head)
76867 +{
76868 + __pax_list_add(new, head, head->next);
76869 +}
76870 +static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
76871 +{
76872 + __pax_list_add(new, head->prev, head);
76873 +}
76874 +extern void pax_list_del(struct list_head *entry);
76875 +
76876 /**
76877 * list_replace - replace old entry by new one
76878 * @old : the element to be replaced
76879 @@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
76880 INIT_LIST_HEAD(entry);
76881 }
76882
76883 +extern void pax_list_del_init(struct list_head *entry);
76884 +
76885 /**
76886 * list_move - delete from one list and add as another's head
76887 * @list: the entry to move
76888 diff --git a/include/linux/math64.h b/include/linux/math64.h
76889 index 69ed5f5..243ed51 100644
76890 --- a/include/linux/math64.h
76891 +++ b/include/linux/math64.h
76892 @@ -15,7 +15,7 @@
76893 * This is commonly provided by 32bit archs to provide an optimized 64bit
76894 * divide.
76895 */
76896 -static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
76897 +static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
76898 {
76899 *remainder = dividend % divisor;
76900 return dividend / divisor;
76901 @@ -42,7 +42,7 @@ static inline u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder)
76902 /**
76903 * div64_u64 - unsigned 64bit divide with 64bit divisor
76904 */
76905 -static inline u64 div64_u64(u64 dividend, u64 divisor)
76906 +static inline u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
76907 {
76908 return dividend / divisor;
76909 }
76910 @@ -61,7 +61,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
76911 #define div64_ul(x, y) div_u64((x), (y))
76912
76913 #ifndef div_u64_rem
76914 -static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
76915 +static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
76916 {
76917 *remainder = do_div(dividend, divisor);
76918 return dividend;
76919 @@ -77,7 +77,7 @@ extern u64 div64_u64_rem(u64 dividend, u64 divisor, u64 *remainder);
76920 #endif
76921
76922 #ifndef div64_u64
76923 -extern u64 div64_u64(u64 dividend, u64 divisor);
76924 +extern u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor);
76925 #endif
76926
76927 #ifndef div64_s64
76928 @@ -94,7 +94,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
76929 * divide.
76930 */
76931 #ifndef div_u64
76932 -static inline u64 div_u64(u64 dividend, u32 divisor)
76933 +static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
76934 {
76935 u32 remainder;
76936 return div_u64_rem(dividend, divisor, &remainder);
76937 diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
76938 index da6716b..2e31db3 100644
76939 --- a/include/linux/mempolicy.h
76940 +++ b/include/linux/mempolicy.h
76941 @@ -91,6 +91,10 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
76942 }
76943
76944 #define vma_policy(vma) ((vma)->vm_policy)
76945 +static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
76946 +{
76947 + vma->vm_policy = pol;
76948 +}
76949
76950 static inline void mpol_get(struct mempolicy *pol)
76951 {
76952 @@ -240,6 +244,9 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
76953 }
76954
76955 #define vma_policy(vma) NULL
76956 +static inline void set_vma_policy(struct vm_area_struct *vma, struct mempolicy *pol)
76957 +{
76958 +}
76959
76960 static inline int
76961 vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
76962 diff --git a/include/linux/mm.h b/include/linux/mm.h
76963 index 8b6e55e..c4edf39 100644
76964 --- a/include/linux/mm.h
76965 +++ b/include/linux/mm.h
76966 @@ -113,6 +113,11 @@ extern unsigned int kobjsize(const void *objp);
76967 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
76968 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
76969 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
76970 +
76971 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
76972 +#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
76973 +#endif
76974 +
76975 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
76976
76977 #ifdef CONFIG_MEM_SOFT_DIRTY
76978 @@ -215,8 +220,8 @@ struct vm_operations_struct {
76979 /* called by access_process_vm when get_user_pages() fails, typically
76980 * for use by special VMAs that can switch between memory and hardware
76981 */
76982 - int (*access)(struct vm_area_struct *vma, unsigned long addr,
76983 - void *buf, int len, int write);
76984 + ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
76985 + void *buf, size_t len, int write);
76986 #ifdef CONFIG_NUMA
76987 /*
76988 * set_policy() op must add a reference to any non-NULL @new mempolicy
76989 @@ -246,6 +251,7 @@ struct vm_operations_struct {
76990 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
76991 unsigned long size, pgoff_t pgoff);
76992 };
76993 +typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
76994
76995 struct mmu_gather;
76996 struct inode;
76997 @@ -977,8 +983,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
76998 unsigned long *pfn);
76999 int follow_phys(struct vm_area_struct *vma, unsigned long address,
77000 unsigned int flags, unsigned long *prot, resource_size_t *phys);
77001 -int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
77002 - void *buf, int len, int write);
77003 +ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
77004 + void *buf, size_t len, int write);
77005
77006 static inline void unmap_shared_mapping_range(struct address_space *mapping,
77007 loff_t const holebegin, loff_t const holelen)
77008 @@ -1017,9 +1023,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
77009 }
77010 #endif
77011
77012 -extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
77013 -extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
77014 - void *buf, int len, int write);
77015 +extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
77016 +extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
77017 + void *buf, size_t len, int write);
77018
77019 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
77020 unsigned long start, unsigned long nr_pages,
77021 @@ -1051,34 +1057,6 @@ int set_page_dirty(struct page *page);
77022 int set_page_dirty_lock(struct page *page);
77023 int clear_page_dirty_for_io(struct page *page);
77024
77025 -/* Is the vma a continuation of the stack vma above it? */
77026 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
77027 -{
77028 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
77029 -}
77030 -
77031 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
77032 - unsigned long addr)
77033 -{
77034 - return (vma->vm_flags & VM_GROWSDOWN) &&
77035 - (vma->vm_start == addr) &&
77036 - !vma_growsdown(vma->vm_prev, addr);
77037 -}
77038 -
77039 -/* Is the vma a continuation of the stack vma below it? */
77040 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
77041 -{
77042 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
77043 -}
77044 -
77045 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
77046 - unsigned long addr)
77047 -{
77048 - return (vma->vm_flags & VM_GROWSUP) &&
77049 - (vma->vm_end == addr) &&
77050 - !vma_growsup(vma->vm_next, addr);
77051 -}
77052 -
77053 extern pid_t
77054 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
77055
77056 @@ -1178,6 +1156,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
77057 }
77058 #endif
77059
77060 +#ifdef CONFIG_MMU
77061 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
77062 +#else
77063 +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
77064 +{
77065 + return __pgprot(0);
77066 +}
77067 +#endif
77068 +
77069 int vma_wants_writenotify(struct vm_area_struct *vma);
77070
77071 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
77072 @@ -1196,8 +1183,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
77073 {
77074 return 0;
77075 }
77076 +
77077 +static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
77078 + unsigned long address)
77079 +{
77080 + return 0;
77081 +}
77082 #else
77083 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
77084 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
77085 #endif
77086
77087 #ifdef __PAGETABLE_PMD_FOLDED
77088 @@ -1206,8 +1200,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
77089 {
77090 return 0;
77091 }
77092 +
77093 +static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
77094 + unsigned long address)
77095 +{
77096 + return 0;
77097 +}
77098 #else
77099 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
77100 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
77101 #endif
77102
77103 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
77104 @@ -1225,11 +1226,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
77105 NULL: pud_offset(pgd, address);
77106 }
77107
77108 +static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
77109 +{
77110 + return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
77111 + NULL: pud_offset(pgd, address);
77112 +}
77113 +
77114 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
77115 {
77116 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
77117 NULL: pmd_offset(pud, address);
77118 }
77119 +
77120 +static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
77121 +{
77122 + return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
77123 + NULL: pmd_offset(pud, address);
77124 +}
77125 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
77126
77127 #if USE_SPLIT_PTLOCKS
77128 @@ -1517,7 +1530,7 @@ extern int install_special_mapping(struct mm_struct *mm,
77129 unsigned long addr, unsigned long len,
77130 unsigned long flags, struct page **pages);
77131
77132 -extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
77133 +extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long) __intentional_overflow(-1);
77134
77135 extern unsigned long mmap_region(struct file *file, unsigned long addr,
77136 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
77137 @@ -1525,6 +1538,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77138 unsigned long len, unsigned long prot, unsigned long flags,
77139 unsigned long pgoff, unsigned long *populate);
77140 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
77141 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
77142
77143 #ifdef CONFIG_MMU
77144 extern int __mm_populate(unsigned long addr, unsigned long len,
77145 @@ -1553,10 +1567,11 @@ struct vm_unmapped_area_info {
77146 unsigned long high_limit;
77147 unsigned long align_mask;
77148 unsigned long align_offset;
77149 + unsigned long threadstack_offset;
77150 };
77151
77152 -extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
77153 -extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
77154 +extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
77155 +extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
77156
77157 /*
77158 * Search for an unmapped address range.
77159 @@ -1568,7 +1583,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
77160 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
77161 */
77162 static inline unsigned long
77163 -vm_unmapped_area(struct vm_unmapped_area_info *info)
77164 +vm_unmapped_area(const struct vm_unmapped_area_info *info)
77165 {
77166 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
77167 return unmapped_area(info);
77168 @@ -1631,6 +1646,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
77169 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
77170 struct vm_area_struct **pprev);
77171
77172 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
77173 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
77174 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
77175 +
77176 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
77177 NULL if none. Assume start_addr < end_addr. */
77178 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
77179 @@ -1659,15 +1678,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
77180 return vma;
77181 }
77182
77183 -#ifdef CONFIG_MMU
77184 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
77185 -#else
77186 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
77187 -{
77188 - return __pgprot(0);
77189 -}
77190 -#endif
77191 -
77192 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
77193 unsigned long change_prot_numa(struct vm_area_struct *vma,
77194 unsigned long start, unsigned long end);
77195 @@ -1719,6 +1729,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
77196 static inline void vm_stat_account(struct mm_struct *mm,
77197 unsigned long flags, struct file *file, long pages)
77198 {
77199 +
77200 +#ifdef CONFIG_PAX_RANDMMAP
77201 + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
77202 +#endif
77203 +
77204 mm->total_vm += pages;
77205 }
77206 #endif /* CONFIG_PROC_FS */
77207 @@ -1800,7 +1815,7 @@ extern int unpoison_memory(unsigned long pfn);
77208 extern int sysctl_memory_failure_early_kill;
77209 extern int sysctl_memory_failure_recovery;
77210 extern void shake_page(struct page *p, int access);
77211 -extern atomic_long_t num_poisoned_pages;
77212 +extern atomic_long_unchecked_t num_poisoned_pages;
77213 extern int soft_offline_page(struct page *page, int flags);
77214
77215 extern void dump_page(struct page *page);
77216 @@ -1837,5 +1852,11 @@ void __init setup_nr_node_ids(void);
77217 static inline void setup_nr_node_ids(void) {}
77218 #endif
77219
77220 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
77221 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
77222 +#else
77223 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
77224 +#endif
77225 +
77226 #endif /* __KERNEL__ */
77227 #endif /* _LINUX_MM_H */
77228 diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
77229 index d9851ee..619492d 100644
77230 --- a/include/linux/mm_types.h
77231 +++ b/include/linux/mm_types.h
77232 @@ -289,6 +289,8 @@ struct vm_area_struct {
77233 #ifdef CONFIG_NUMA
77234 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
77235 #endif
77236 +
77237 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
77238 };
77239
77240 struct core_thread {
77241 @@ -436,6 +438,24 @@ struct mm_struct {
77242 int first_nid;
77243 #endif
77244 struct uprobes_state uprobes_state;
77245 +
77246 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
77247 + unsigned long pax_flags;
77248 +#endif
77249 +
77250 +#ifdef CONFIG_PAX_DLRESOLVE
77251 + unsigned long call_dl_resolve;
77252 +#endif
77253 +
77254 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
77255 + unsigned long call_syscall;
77256 +#endif
77257 +
77258 +#ifdef CONFIG_PAX_ASLR
77259 + unsigned long delta_mmap; /* randomized offset */
77260 + unsigned long delta_stack; /* randomized offset */
77261 +#endif
77262 +
77263 };
77264
77265 /* first nid will either be a valid NID or one of these values */
77266 diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
77267 index c5d5278..f0b68c8 100644
77268 --- a/include/linux/mmiotrace.h
77269 +++ b/include/linux/mmiotrace.h
77270 @@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
77271 /* Called from ioremap.c */
77272 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
77273 void __iomem *addr);
77274 -extern void mmiotrace_iounmap(volatile void __iomem *addr);
77275 +extern void mmiotrace_iounmap(const volatile void __iomem *addr);
77276
77277 /* For anyone to insert markers. Remember trailing newline. */
77278 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
77279 @@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
77280 {
77281 }
77282
77283 -static inline void mmiotrace_iounmap(volatile void __iomem *addr)
77284 +static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
77285 {
77286 }
77287
77288 diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
77289 index bd791e4..8617c34f 100644
77290 --- a/include/linux/mmzone.h
77291 +++ b/include/linux/mmzone.h
77292 @@ -396,7 +396,7 @@ struct zone {
77293 unsigned long flags; /* zone flags, see below */
77294
77295 /* Zone statistics */
77296 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
77297 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
77298
77299 /*
77300 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
77301 diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
77302 index 45e9214..a7227d6 100644
77303 --- a/include/linux/mod_devicetable.h
77304 +++ b/include/linux/mod_devicetable.h
77305 @@ -13,7 +13,7 @@
77306 typedef unsigned long kernel_ulong_t;
77307 #endif
77308
77309 -#define PCI_ANY_ID (~0)
77310 +#define PCI_ANY_ID ((__u16)~0)
77311
77312 struct pci_device_id {
77313 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
77314 @@ -139,7 +139,7 @@ struct usb_device_id {
77315 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
77316 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
77317
77318 -#define HID_ANY_ID (~0)
77319 +#define HID_ANY_ID (~0U)
77320 #define HID_BUS_ANY 0xffff
77321 #define HID_GROUP_ANY 0x0000
77322
77323 @@ -467,7 +467,7 @@ struct dmi_system_id {
77324 const char *ident;
77325 struct dmi_strmatch matches[4];
77326 void *driver_data;
77327 -};
77328 +} __do_const;
77329 /*
77330 * struct dmi_device_id appears during expansion of
77331 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
77332 diff --git a/include/linux/module.h b/include/linux/module.h
77333 index 05f2447..2aee07c 100644
77334 --- a/include/linux/module.h
77335 +++ b/include/linux/module.h
77336 @@ -17,9 +17,11 @@
77337 #include <linux/moduleparam.h>
77338 #include <linux/tracepoint.h>
77339 #include <linux/export.h>
77340 +#include <linux/fs.h>
77341
77342 #include <linux/percpu.h>
77343 #include <asm/module.h>
77344 +#include <asm/pgtable.h>
77345
77346 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
77347 #define MODULE_SIG_STRING "~Module signature appended~\n"
77348 @@ -55,12 +57,13 @@ struct module_attribute {
77349 int (*test)(struct module *);
77350 void (*free)(struct module *);
77351 };
77352 +typedef struct module_attribute __no_const module_attribute_no_const;
77353
77354 struct module_version_attribute {
77355 struct module_attribute mattr;
77356 const char *module_name;
77357 const char *version;
77358 -} __attribute__ ((__aligned__(sizeof(void *))));
77359 +} __do_const __attribute__ ((__aligned__(sizeof(void *))));
77360
77361 extern ssize_t __modver_version_show(struct module_attribute *,
77362 struct module_kobject *, char *);
77363 @@ -238,7 +241,7 @@ struct module
77364
77365 /* Sysfs stuff. */
77366 struct module_kobject mkobj;
77367 - struct module_attribute *modinfo_attrs;
77368 + module_attribute_no_const *modinfo_attrs;
77369 const char *version;
77370 const char *srcversion;
77371 struct kobject *holders_dir;
77372 @@ -287,19 +290,16 @@ struct module
77373 int (*init)(void);
77374
77375 /* If this is non-NULL, vfree after init() returns */
77376 - void *module_init;
77377 + void *module_init_rx, *module_init_rw;
77378
77379 /* Here is the actual code + data, vfree'd on unload. */
77380 - void *module_core;
77381 + void *module_core_rx, *module_core_rw;
77382
77383 /* Here are the sizes of the init and core sections */
77384 - unsigned int init_size, core_size;
77385 + unsigned int init_size_rw, core_size_rw;
77386
77387 /* The size of the executable code in each section. */
77388 - unsigned int init_text_size, core_text_size;
77389 -
77390 - /* Size of RO sections of the module (text+rodata) */
77391 - unsigned int init_ro_size, core_ro_size;
77392 + unsigned int init_size_rx, core_size_rx;
77393
77394 /* Arch-specific module values */
77395 struct mod_arch_specific arch;
77396 @@ -355,6 +355,10 @@ struct module
77397 #ifdef CONFIG_EVENT_TRACING
77398 struct ftrace_event_call **trace_events;
77399 unsigned int num_trace_events;
77400 + struct file_operations trace_id;
77401 + struct file_operations trace_enable;
77402 + struct file_operations trace_format;
77403 + struct file_operations trace_filter;
77404 #endif
77405 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
77406 unsigned int num_ftrace_callsites;
77407 @@ -402,16 +406,46 @@ bool is_module_address(unsigned long addr);
77408 bool is_module_percpu_address(unsigned long addr);
77409 bool is_module_text_address(unsigned long addr);
77410
77411 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
77412 +{
77413 +
77414 +#ifdef CONFIG_PAX_KERNEXEC
77415 + if (ktla_ktva(addr) >= (unsigned long)start &&
77416 + ktla_ktva(addr) < (unsigned long)start + size)
77417 + return 1;
77418 +#endif
77419 +
77420 + return ((void *)addr >= start && (void *)addr < start + size);
77421 +}
77422 +
77423 +static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
77424 +{
77425 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
77426 +}
77427 +
77428 +static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
77429 +{
77430 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
77431 +}
77432 +
77433 +static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
77434 +{
77435 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
77436 +}
77437 +
77438 +static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
77439 +{
77440 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
77441 +}
77442 +
77443 static inline int within_module_core(unsigned long addr, const struct module *mod)
77444 {
77445 - return (unsigned long)mod->module_core <= addr &&
77446 - addr < (unsigned long)mod->module_core + mod->core_size;
77447 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
77448 }
77449
77450 static inline int within_module_init(unsigned long addr, const struct module *mod)
77451 {
77452 - return (unsigned long)mod->module_init <= addr &&
77453 - addr < (unsigned long)mod->module_init + mod->init_size;
77454 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
77455 }
77456
77457 /* Search for module by name: must hold module_mutex. */
77458 diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
77459 index 560ca53..ef621ef 100644
77460 --- a/include/linux/moduleloader.h
77461 +++ b/include/linux/moduleloader.h
77462 @@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
77463 sections. Returns NULL on failure. */
77464 void *module_alloc(unsigned long size);
77465
77466 +#ifdef CONFIG_PAX_KERNEXEC
77467 +void *module_alloc_exec(unsigned long size);
77468 +#else
77469 +#define module_alloc_exec(x) module_alloc(x)
77470 +#endif
77471 +
77472 /* Free memory returned from module_alloc. */
77473 void module_free(struct module *mod, void *module_region);
77474
77475 +#ifdef CONFIG_PAX_KERNEXEC
77476 +void module_free_exec(struct module *mod, void *module_region);
77477 +#else
77478 +#define module_free_exec(x, y) module_free((x), (y))
77479 +#endif
77480 +
77481 /*
77482 * Apply the given relocation to the (simplified) ELF. Return -error
77483 * or 0.
77484 @@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
77485 unsigned int relsec,
77486 struct module *me)
77487 {
77488 +#ifdef CONFIG_MODULES
77489 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
77490 +#endif
77491 return -ENOEXEC;
77492 }
77493 #endif
77494 @@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
77495 unsigned int relsec,
77496 struct module *me)
77497 {
77498 +#ifdef CONFIG_MODULES
77499 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
77500 +#endif
77501 return -ENOEXEC;
77502 }
77503 #endif
77504 diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
77505 index c3eb102..073c4a6 100644
77506 --- a/include/linux/moduleparam.h
77507 +++ b/include/linux/moduleparam.h
77508 @@ -295,7 +295,7 @@ static inline void __kernel_param_unlock(void)
77509 * @len is usually just sizeof(string).
77510 */
77511 #define module_param_string(name, string, len, perm) \
77512 - static const struct kparam_string __param_string_##name \
77513 + static const struct kparam_string __param_string_##name __used \
77514 = { len, string }; \
77515 __module_param_call(MODULE_PARAM_PREFIX, name, \
77516 &param_ops_string, \
77517 @@ -434,7 +434,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
77518 */
77519 #define module_param_array_named(name, array, type, nump, perm) \
77520 param_check_##type(name, &(array)[0]); \
77521 - static const struct kparam_array __param_arr_##name \
77522 + static const struct kparam_array __param_arr_##name __used \
77523 = { .max = ARRAY_SIZE(array), .num = nump, \
77524 .ops = &param_ops_##type, \
77525 .elemsize = sizeof(array[0]), .elem = array }; \
77526 diff --git a/include/linux/namei.h b/include/linux/namei.h
77527 index 8e47bc7..c70fd73 100644
77528 --- a/include/linux/namei.h
77529 +++ b/include/linux/namei.h
77530 @@ -19,7 +19,7 @@ struct nameidata {
77531 unsigned seq;
77532 int last_type;
77533 unsigned depth;
77534 - char *saved_names[MAX_NESTED_LINKS + 1];
77535 + const char *saved_names[MAX_NESTED_LINKS + 1];
77536 };
77537
77538 /*
77539 @@ -83,12 +83,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
77540
77541 extern void nd_jump_link(struct nameidata *nd, struct path *path);
77542
77543 -static inline void nd_set_link(struct nameidata *nd, char *path)
77544 +static inline void nd_set_link(struct nameidata *nd, const char *path)
77545 {
77546 nd->saved_names[nd->depth] = path;
77547 }
77548
77549 -static inline char *nd_get_link(struct nameidata *nd)
77550 +static inline const char *nd_get_link(const struct nameidata *nd)
77551 {
77552 return nd->saved_names[nd->depth];
77553 }
77554 diff --git a/include/linux/net.h b/include/linux/net.h
77555 index 8bd9d92..08b1c20 100644
77556 --- a/include/linux/net.h
77557 +++ b/include/linux/net.h
77558 @@ -191,7 +191,7 @@ struct net_proto_family {
77559 int (*create)(struct net *net, struct socket *sock,
77560 int protocol, int kern);
77561 struct module *owner;
77562 -};
77563 +} __do_const;
77564
77565 struct iovec;
77566 struct kvec;
77567 diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
77568 index 25f5d2d1..5cf2120 100644
77569 --- a/include/linux/netdevice.h
77570 +++ b/include/linux/netdevice.h
77571 @@ -1098,6 +1098,7 @@ struct net_device_ops {
77572 sa_family_t sa_family,
77573 __be16 port);
77574 };
77575 +typedef struct net_device_ops __no_const net_device_ops_no_const;
77576
77577 /*
77578 * The DEVICE structure.
77579 @@ -1169,7 +1170,7 @@ struct net_device {
77580 int iflink;
77581
77582 struct net_device_stats stats;
77583 - atomic_long_t rx_dropped; /* dropped packets by core network
77584 + atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
77585 * Do not use this in drivers.
77586 */
77587
77588 diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
77589 index 708fe72ab9..77084a3 100644
77590 --- a/include/linux/netfilter.h
77591 +++ b/include/linux/netfilter.h
77592 @@ -82,7 +82,7 @@ struct nf_sockopt_ops {
77593 #endif
77594 /* Use the module struct to lock set/get code in place */
77595 struct module *owner;
77596 -};
77597 +} __do_const;
77598
77599 /* Function to register/unregister hook points. */
77600 int nf_register_hook(struct nf_hook_ops *reg);
77601 diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
77602 index cadb740..d7c37c0 100644
77603 --- a/include/linux/netfilter/nfnetlink.h
77604 +++ b/include/linux/netfilter/nfnetlink.h
77605 @@ -16,7 +16,7 @@ struct nfnl_callback {
77606 const struct nlattr * const cda[]);
77607 const struct nla_policy *policy; /* netlink attribute policy */
77608 const u_int16_t attr_count; /* number of nlattr's */
77609 -};
77610 +} __do_const;
77611
77612 struct nfnetlink_subsystem {
77613 const char *name;
77614 diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
77615 new file mode 100644
77616 index 0000000..33f4af8
77617 --- /dev/null
77618 +++ b/include/linux/netfilter/xt_gradm.h
77619 @@ -0,0 +1,9 @@
77620 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
77621 +#define _LINUX_NETFILTER_XT_GRADM_H 1
77622 +
77623 +struct xt_gradm_mtinfo {
77624 + __u16 flags;
77625 + __u16 invflags;
77626 +};
77627 +
77628 +#endif
77629 diff --git a/include/linux/nls.h b/include/linux/nls.h
77630 index 5dc635f..35f5e11 100644
77631 --- a/include/linux/nls.h
77632 +++ b/include/linux/nls.h
77633 @@ -31,7 +31,7 @@ struct nls_table {
77634 const unsigned char *charset2upper;
77635 struct module *owner;
77636 struct nls_table *next;
77637 -};
77638 +} __do_const;
77639
77640 /* this value hold the maximum octet of charset */
77641 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
77642 diff --git a/include/linux/notifier.h b/include/linux/notifier.h
77643 index d14a4c3..a078786 100644
77644 --- a/include/linux/notifier.h
77645 +++ b/include/linux/notifier.h
77646 @@ -54,7 +54,8 @@ struct notifier_block {
77647 notifier_fn_t notifier_call;
77648 struct notifier_block __rcu *next;
77649 int priority;
77650 -};
77651 +} __do_const;
77652 +typedef struct notifier_block __no_const notifier_block_no_const;
77653
77654 struct atomic_notifier_head {
77655 spinlock_t lock;
77656 diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
77657 index b2a0f15..4d7da32 100644
77658 --- a/include/linux/oprofile.h
77659 +++ b/include/linux/oprofile.h
77660 @@ -138,9 +138,9 @@ int oprofilefs_create_ulong(struct dentry * root,
77661 int oprofilefs_create_ro_ulong(struct dentry * root,
77662 char const * name, ulong * val);
77663
77664 -/** Create a file for read-only access to an atomic_t. */
77665 +/** Create a file for read-only access to an atomic_unchecked_t. */
77666 int oprofilefs_create_ro_atomic(struct dentry * root,
77667 - char const * name, atomic_t * val);
77668 + char const * name, atomic_unchecked_t * val);
77669
77670 /** create a directory */
77671 struct dentry *oprofilefs_mkdir(struct dentry *parent, char const *name);
77672 diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
77673 index 430dd96..544e26e 100644
77674 --- a/include/linux/pci_hotplug.h
77675 +++ b/include/linux/pci_hotplug.h
77676 @@ -71,7 +71,8 @@ struct hotplug_slot_ops {
77677 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
77678 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
77679 int (*reset_slot) (struct hotplug_slot *slot, int probe);
77680 -};
77681 +} __do_const;
77682 +typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
77683
77684 /**
77685 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
77686 diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
77687 index c8ba627..24bdfa8 100644
77688 --- a/include/linux/perf_event.h
77689 +++ b/include/linux/perf_event.h
77690 @@ -327,8 +327,8 @@ struct perf_event {
77691
77692 enum perf_event_active_state state;
77693 unsigned int attach_state;
77694 - local64_t count;
77695 - atomic64_t child_count;
77696 + local64_t count; /* PaX: fix it one day */
77697 + atomic64_unchecked_t child_count;
77698
77699 /*
77700 * These are the total time in nanoseconds that the event
77701 @@ -379,8 +379,8 @@ struct perf_event {
77702 * These accumulate total time (in nanoseconds) that children
77703 * events have been enabled and running, respectively.
77704 */
77705 - atomic64_t child_total_time_enabled;
77706 - atomic64_t child_total_time_running;
77707 + atomic64_unchecked_t child_total_time_enabled;
77708 + atomic64_unchecked_t child_total_time_running;
77709
77710 /*
77711 * Protect attach/detach and child_list:
77712 @@ -702,7 +702,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
77713 entry->ip[entry->nr++] = ip;
77714 }
77715
77716 -extern int sysctl_perf_event_paranoid;
77717 +extern int sysctl_perf_event_legitimately_concerned;
77718 extern int sysctl_perf_event_mlock;
77719 extern int sysctl_perf_event_sample_rate;
77720 extern int sysctl_perf_cpu_time_max_percent;
77721 @@ -717,19 +717,24 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
77722 loff_t *ppos);
77723
77724
77725 +static inline bool perf_paranoid_any(void)
77726 +{
77727 + return sysctl_perf_event_legitimately_concerned > 2;
77728 +}
77729 +
77730 static inline bool perf_paranoid_tracepoint_raw(void)
77731 {
77732 - return sysctl_perf_event_paranoid > -1;
77733 + return sysctl_perf_event_legitimately_concerned > -1;
77734 }
77735
77736 static inline bool perf_paranoid_cpu(void)
77737 {
77738 - return sysctl_perf_event_paranoid > 0;
77739 + return sysctl_perf_event_legitimately_concerned > 0;
77740 }
77741
77742 static inline bool perf_paranoid_kernel(void)
77743 {
77744 - return sysctl_perf_event_paranoid > 1;
77745 + return sysctl_perf_event_legitimately_concerned > 1;
77746 }
77747
77748 extern void perf_event_init(void);
77749 @@ -845,7 +850,7 @@ struct perf_pmu_events_attr {
77750 struct device_attribute attr;
77751 u64 id;
77752 const char *event_str;
77753 -};
77754 +} __do_const;
77755
77756 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
77757 static struct perf_pmu_events_attr _var = { \
77758 diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
77759 index b8809fe..ae4ccd0 100644
77760 --- a/include/linux/pipe_fs_i.h
77761 +++ b/include/linux/pipe_fs_i.h
77762 @@ -47,10 +47,10 @@ struct pipe_inode_info {
77763 struct mutex mutex;
77764 wait_queue_head_t wait;
77765 unsigned int nrbufs, curbuf, buffers;
77766 - unsigned int readers;
77767 - unsigned int writers;
77768 - unsigned int files;
77769 - unsigned int waiting_writers;
77770 + atomic_t readers;
77771 + atomic_t writers;
77772 + atomic_t files;
77773 + atomic_t waiting_writers;
77774 unsigned int r_counter;
77775 unsigned int w_counter;
77776 struct page *tmp_page;
77777 diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
77778 index 5f28cae..3d23723 100644
77779 --- a/include/linux/platform_data/usb-ehci-s5p.h
77780 +++ b/include/linux/platform_data/usb-ehci-s5p.h
77781 @@ -14,7 +14,7 @@
77782 struct s5p_ehci_platdata {
77783 int (*phy_init)(struct platform_device *pdev, int type);
77784 int (*phy_exit)(struct platform_device *pdev, int type);
77785 -};
77786 +} __no_const;
77787
77788 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
77789
77790 diff --git a/include/linux/platform_data/usb-ohci-exynos.h b/include/linux/platform_data/usb-ohci-exynos.h
77791 index c256c59..8ea94c7 100644
77792 --- a/include/linux/platform_data/usb-ohci-exynos.h
77793 +++ b/include/linux/platform_data/usb-ohci-exynos.h
77794 @@ -14,7 +14,7 @@
77795 struct exynos4_ohci_platdata {
77796 int (*phy_init)(struct platform_device *pdev, int type);
77797 int (*phy_exit)(struct platform_device *pdev, int type);
77798 -};
77799 +} __no_const;
77800
77801 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
77802
77803 diff --git a/include/linux/pm.h b/include/linux/pm.h
77804 index a224c7f..92d8a97 100644
77805 --- a/include/linux/pm.h
77806 +++ b/include/linux/pm.h
77807 @@ -576,6 +576,7 @@ extern int dev_pm_put_subsys_data(struct device *dev);
77808 struct dev_pm_domain {
77809 struct dev_pm_ops ops;
77810 };
77811 +typedef struct dev_pm_domain __no_const dev_pm_domain_no_const;
77812
77813 /*
77814 * The PM_EVENT_ messages are also used by drivers implementing the legacy
77815 diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
77816 index 7c1d252..c5c773e 100644
77817 --- a/include/linux/pm_domain.h
77818 +++ b/include/linux/pm_domain.h
77819 @@ -48,7 +48,7 @@ struct gpd_dev_ops {
77820
77821 struct gpd_cpu_data {
77822 unsigned int saved_exit_latency;
77823 - struct cpuidle_state *idle_state;
77824 + cpuidle_state_no_const *idle_state;
77825 };
77826
77827 struct generic_pm_domain {
77828 diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
77829 index 6fa7cea..7bf6415 100644
77830 --- a/include/linux/pm_runtime.h
77831 +++ b/include/linux/pm_runtime.h
77832 @@ -103,7 +103,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
77833
77834 static inline void pm_runtime_mark_last_busy(struct device *dev)
77835 {
77836 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
77837 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
77838 }
77839
77840 #else /* !CONFIG_PM_RUNTIME */
77841 diff --git a/include/linux/pnp.h b/include/linux/pnp.h
77842 index 195aafc..49a7bc2 100644
77843 --- a/include/linux/pnp.h
77844 +++ b/include/linux/pnp.h
77845 @@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
77846 struct pnp_fixup {
77847 char id[7];
77848 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
77849 -};
77850 +} __do_const;
77851
77852 /* config parameters */
77853 #define PNP_CONFIG_NORMAL 0x0001
77854 diff --git a/include/linux/poison.h b/include/linux/poison.h
77855 index 2110a81..13a11bb 100644
77856 --- a/include/linux/poison.h
77857 +++ b/include/linux/poison.h
77858 @@ -19,8 +19,8 @@
77859 * under normal circumstances, used to verify that nobody uses
77860 * non-initialized list entries.
77861 */
77862 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
77863 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
77864 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
77865 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
77866
77867 /********** include/linux/timer.h **********/
77868 /*
77869 diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
77870 index d8b187c3..9a9257a 100644
77871 --- a/include/linux/power/smartreflex.h
77872 +++ b/include/linux/power/smartreflex.h
77873 @@ -238,7 +238,7 @@ struct omap_sr_class_data {
77874 int (*notify)(struct omap_sr *sr, u32 status);
77875 u8 notify_flags;
77876 u8 class_type;
77877 -};
77878 +} __do_const;
77879
77880 /**
77881 * struct omap_sr_nvalue_table - Smartreflex n-target value info
77882 diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
77883 index 4ea1d37..80f4b33 100644
77884 --- a/include/linux/ppp-comp.h
77885 +++ b/include/linux/ppp-comp.h
77886 @@ -84,7 +84,7 @@ struct compressor {
77887 struct module *owner;
77888 /* Extra skb space needed by the compressor algorithm */
77889 unsigned int comp_extra;
77890 -};
77891 +} __do_const;
77892
77893 /*
77894 * The return value from decompress routine is the length of the
77895 diff --git a/include/linux/preempt.h b/include/linux/preempt.h
77896 index f5d4723..a6ea2fa 100644
77897 --- a/include/linux/preempt.h
77898 +++ b/include/linux/preempt.h
77899 @@ -18,8 +18,13 @@
77900 # define sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
77901 #endif
77902
77903 +#define raw_add_preempt_count(val) do { preempt_count() += (val); } while (0)
77904 +#define raw_sub_preempt_count(val) do { preempt_count() -= (val); } while (0)
77905 +
77906 #define inc_preempt_count() add_preempt_count(1)
77907 +#define raw_inc_preempt_count() raw_add_preempt_count(1)
77908 #define dec_preempt_count() sub_preempt_count(1)
77909 +#define raw_dec_preempt_count() raw_sub_preempt_count(1)
77910
77911 #define preempt_count() (current_thread_info()->preempt_count)
77912
77913 @@ -64,6 +69,12 @@ do { \
77914 barrier(); \
77915 } while (0)
77916
77917 +#define raw_preempt_disable() \
77918 +do { \
77919 + raw_inc_preempt_count(); \
77920 + barrier(); \
77921 +} while (0)
77922 +
77923 #define sched_preempt_enable_no_resched() \
77924 do { \
77925 barrier(); \
77926 @@ -72,6 +83,12 @@ do { \
77927
77928 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
77929
77930 +#define raw_preempt_enable_no_resched() \
77931 +do { \
77932 + barrier(); \
77933 + raw_dec_preempt_count(); \
77934 +} while (0)
77935 +
77936 #define preempt_enable() \
77937 do { \
77938 preempt_enable_no_resched(); \
77939 @@ -116,8 +133,10 @@ do { \
77940 * region.
77941 */
77942 #define preempt_disable() barrier()
77943 +#define raw_preempt_disable() barrier()
77944 #define sched_preempt_enable_no_resched() barrier()
77945 #define preempt_enable_no_resched() barrier()
77946 +#define raw_preempt_enable_no_resched() barrier()
77947 #define preempt_enable() barrier()
77948
77949 #define preempt_disable_notrace() barrier()
77950 diff --git a/include/linux/printk.h b/include/linux/printk.h
77951 index e6131a78..8e9fb61 100644
77952 --- a/include/linux/printk.h
77953 +++ b/include/linux/printk.h
77954 @@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
77955 void early_printk(const char *s, ...) { }
77956 #endif
77957
77958 +extern int kptr_restrict;
77959 +
77960 #ifdef CONFIG_PRINTK
77961 asmlinkage __printf(5, 0)
77962 int vprintk_emit(int facility, int level,
77963 @@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
77964
77965 extern int printk_delay_msec;
77966 extern int dmesg_restrict;
77967 -extern int kptr_restrict;
77968
77969 extern void wake_up_klogd(void);
77970
77971 diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
77972 index 608e60a..c26f864 100644
77973 --- a/include/linux/proc_fs.h
77974 +++ b/include/linux/proc_fs.h
77975 @@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
77976 return proc_create_data(name, mode, parent, proc_fops, NULL);
77977 }
77978
77979 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
77980 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
77981 +{
77982 +#ifdef CONFIG_GRKERNSEC_PROC_USER
77983 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
77984 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77985 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
77986 +#else
77987 + return proc_create_data(name, mode, parent, proc_fops, NULL);
77988 +#endif
77989 +}
77990 +
77991 +
77992 extern void proc_set_size(struct proc_dir_entry *, loff_t);
77993 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
77994 extern void *PDE_DATA(const struct inode *);
77995 diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
77996 index 34a1e10..03a6d03 100644
77997 --- a/include/linux/proc_ns.h
77998 +++ b/include/linux/proc_ns.h
77999 @@ -14,7 +14,7 @@ struct proc_ns_operations {
78000 void (*put)(void *ns);
78001 int (*install)(struct nsproxy *nsproxy, void *ns);
78002 unsigned int (*inum)(void *ns);
78003 -};
78004 +} __do_const;
78005
78006 struct proc_ns {
78007 void *ns;
78008 diff --git a/include/linux/quota.h b/include/linux/quota.h
78009 index cc7494a..1e27036 100644
78010 --- a/include/linux/quota.h
78011 +++ b/include/linux/quota.h
78012 @@ -70,7 +70,7 @@ struct kqid { /* Type in which we store the quota identifier */
78013
78014 extern bool qid_eq(struct kqid left, struct kqid right);
78015 extern bool qid_lt(struct kqid left, struct kqid right);
78016 -extern qid_t from_kqid(struct user_namespace *to, struct kqid qid);
78017 +extern qid_t from_kqid(struct user_namespace *to, struct kqid qid) __intentional_overflow(-1);
78018 extern qid_t from_kqid_munged(struct user_namespace *to, struct kqid qid);
78019 extern bool qid_valid(struct kqid qid);
78020
78021 diff --git a/include/linux/random.h b/include/linux/random.h
78022 index bf9085e..1e8bbcf 100644
78023 --- a/include/linux/random.h
78024 +++ b/include/linux/random.h
78025 @@ -10,9 +10,19 @@
78026
78027
78028 extern void add_device_randomness(const void *, unsigned int);
78029 +
78030 +static inline void add_latent_entropy(void)
78031 +{
78032 +
78033 +#ifdef LATENT_ENTROPY_PLUGIN
78034 + add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
78035 +#endif
78036 +
78037 +}
78038 +
78039 extern void add_input_randomness(unsigned int type, unsigned int code,
78040 - unsigned int value);
78041 -extern void add_interrupt_randomness(int irq, int irq_flags);
78042 + unsigned int value) __latent_entropy;
78043 +extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
78044
78045 extern void get_random_bytes(void *buf, int nbytes);
78046 extern void get_random_bytes_arch(void *buf, int nbytes);
78047 @@ -23,16 +33,21 @@ extern int random_int_secret_init(void);
78048 extern const struct file_operations random_fops, urandom_fops;
78049 #endif
78050
78051 -unsigned int get_random_int(void);
78052 +unsigned int __intentional_overflow(-1) get_random_int(void);
78053 unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
78054
78055 -u32 prandom_u32(void);
78056 +u32 prandom_u32(void) __intentional_overflow(-1);
78057 void prandom_bytes(void *buf, int nbytes);
78058 void prandom_seed(u32 seed);
78059
78060 u32 prandom_u32_state(struct rnd_state *);
78061 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
78062
78063 +static inline unsigned long __intentional_overflow(-1) pax_get_random_long(void)
78064 +{
78065 + return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
78066 +}
78067 +
78068 /*
78069 * Handle minimum values for seeds
78070 */
78071 diff --git a/include/linux/rculist.h b/include/linux/rculist.h
78072 index 4106721..132d42c 100644
78073 --- a/include/linux/rculist.h
78074 +++ b/include/linux/rculist.h
78075 @@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
78076 struct list_head *prev, struct list_head *next);
78077 #endif
78078
78079 +extern void __pax_list_add_rcu(struct list_head *new,
78080 + struct list_head *prev, struct list_head *next);
78081 +
78082 /**
78083 * list_add_rcu - add a new entry to rcu-protected list
78084 * @new: new entry to be added
78085 @@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
78086 __list_add_rcu(new, head, head->next);
78087 }
78088
78089 +static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
78090 +{
78091 + __pax_list_add_rcu(new, head, head->next);
78092 +}
78093 +
78094 /**
78095 * list_add_tail_rcu - add a new entry to rcu-protected list
78096 * @new: new entry to be added
78097 @@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
78098 __list_add_rcu(new, head->prev, head);
78099 }
78100
78101 +static inline void pax_list_add_tail_rcu(struct list_head *new,
78102 + struct list_head *head)
78103 +{
78104 + __pax_list_add_rcu(new, head->prev, head);
78105 +}
78106 +
78107 /**
78108 * list_del_rcu - deletes entry from list without re-initialization
78109 * @entry: the element to delete from the list.
78110 @@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
78111 entry->prev = LIST_POISON2;
78112 }
78113
78114 +extern void pax_list_del_rcu(struct list_head *entry);
78115 +
78116 /**
78117 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
78118 * @n: the element to delete from the hash list.
78119 diff --git a/include/linux/reboot.h b/include/linux/reboot.h
78120 index 8e00f9f..9449b55 100644
78121 --- a/include/linux/reboot.h
78122 +++ b/include/linux/reboot.h
78123 @@ -43,9 +43,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
78124 * Architecture-specific implementations of sys_reboot commands.
78125 */
78126
78127 -extern void machine_restart(char *cmd);
78128 -extern void machine_halt(void);
78129 -extern void machine_power_off(void);
78130 +extern void machine_restart(char *cmd) __noreturn;
78131 +extern void machine_halt(void) __noreturn;
78132 +extern void machine_power_off(void) __noreturn;
78133
78134 extern void machine_shutdown(void);
78135 struct pt_regs;
78136 @@ -56,9 +56,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
78137 */
78138
78139 extern void kernel_restart_prepare(char *cmd);
78140 -extern void kernel_restart(char *cmd);
78141 -extern void kernel_halt(void);
78142 -extern void kernel_power_off(void);
78143 +extern void kernel_restart(char *cmd) __noreturn;
78144 +extern void kernel_halt(void) __noreturn;
78145 +extern void kernel_power_off(void) __noreturn;
78146
78147 extern int C_A_D; /* for sysctl */
78148 void ctrl_alt_del(void);
78149 @@ -72,7 +72,7 @@ extern int orderly_poweroff(bool force);
78150 * Emergency restart, callable from an interrupt handler.
78151 */
78152
78153 -extern void emergency_restart(void);
78154 +extern void emergency_restart(void) __noreturn;
78155 #include <asm/emergency-restart.h>
78156
78157 #endif /* _LINUX_REBOOT_H */
78158 diff --git a/include/linux/regset.h b/include/linux/regset.h
78159 index 8e0c9fe..ac4d221 100644
78160 --- a/include/linux/regset.h
78161 +++ b/include/linux/regset.h
78162 @@ -161,7 +161,8 @@ struct user_regset {
78163 unsigned int align;
78164 unsigned int bias;
78165 unsigned int core_note_type;
78166 -};
78167 +} __do_const;
78168 +typedef struct user_regset __no_const user_regset_no_const;
78169
78170 /**
78171 * struct user_regset_view - available regsets
78172 diff --git a/include/linux/relay.h b/include/linux/relay.h
78173 index d7c8359..818daf5 100644
78174 --- a/include/linux/relay.h
78175 +++ b/include/linux/relay.h
78176 @@ -157,7 +157,7 @@ struct rchan_callbacks
78177 * The callback should return 0 if successful, negative if not.
78178 */
78179 int (*remove_buf_file)(struct dentry *dentry);
78180 -};
78181 +} __no_const;
78182
78183 /*
78184 * CONFIG_RELAY kernel API, kernel/relay.c
78185 diff --git a/include/linux/rio.h b/include/linux/rio.h
78186 index b71d573..2f940bd 100644
78187 --- a/include/linux/rio.h
78188 +++ b/include/linux/rio.h
78189 @@ -355,7 +355,7 @@ struct rio_ops {
78190 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
78191 u64 rstart, u32 size, u32 flags);
78192 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
78193 -};
78194 +} __no_const;
78195
78196 #define RIO_RESOURCE_MEM 0x00000100
78197 #define RIO_RESOURCE_DOORBELL 0x00000200
78198 diff --git a/include/linux/rmap.h b/include/linux/rmap.h
78199 index 6dacb93..6174423 100644
78200 --- a/include/linux/rmap.h
78201 +++ b/include/linux/rmap.h
78202 @@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
78203 void anon_vma_init(void); /* create anon_vma_cachep */
78204 int anon_vma_prepare(struct vm_area_struct *);
78205 void unlink_anon_vmas(struct vm_area_struct *);
78206 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
78207 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
78208 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
78209 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
78210
78211 static inline void anon_vma_merge(struct vm_area_struct *vma,
78212 struct vm_area_struct *next)
78213 diff --git a/include/linux/sched.h b/include/linux/sched.h
78214 index b1e963e..114b8fd 100644
78215 --- a/include/linux/sched.h
78216 +++ b/include/linux/sched.h
78217 @@ -62,6 +62,7 @@ struct bio_list;
78218 struct fs_struct;
78219 struct perf_event_context;
78220 struct blk_plug;
78221 +struct linux_binprm;
78222
78223 /*
78224 * List of flags we want to share for kernel threads,
78225 @@ -295,7 +296,7 @@ extern char __sched_text_start[], __sched_text_end[];
78226 extern int in_sched_functions(unsigned long addr);
78227
78228 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
78229 -extern signed long schedule_timeout(signed long timeout);
78230 +extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
78231 extern signed long schedule_timeout_interruptible(signed long timeout);
78232 extern signed long schedule_timeout_killable(signed long timeout);
78233 extern signed long schedule_timeout_uninterruptible(signed long timeout);
78234 @@ -306,6 +307,19 @@ struct nsproxy;
78235 struct user_namespace;
78236
78237 #ifdef CONFIG_MMU
78238 +
78239 +#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
78240 +extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
78241 +#else
78242 +static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
78243 +{
78244 + return 0;
78245 +}
78246 +#endif
78247 +
78248 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
78249 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
78250 +
78251 extern void arch_pick_mmap_layout(struct mm_struct *mm);
78252 extern unsigned long
78253 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
78254 @@ -585,6 +599,17 @@ struct signal_struct {
78255 #ifdef CONFIG_TASKSTATS
78256 struct taskstats *stats;
78257 #endif
78258 +
78259 +#ifdef CONFIG_GRKERNSEC
78260 + u32 curr_ip;
78261 + u32 saved_ip;
78262 + u32 gr_saddr;
78263 + u32 gr_daddr;
78264 + u16 gr_sport;
78265 + u16 gr_dport;
78266 + u8 used_accept:1;
78267 +#endif
78268 +
78269 #ifdef CONFIG_AUDIT
78270 unsigned audit_tty;
78271 unsigned audit_tty_log_passwd;
78272 @@ -665,6 +690,14 @@ struct user_struct {
78273 struct key *session_keyring; /* UID's default session keyring */
78274 #endif
78275
78276 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
78277 + unsigned char kernel_banned;
78278 +#endif
78279 +#ifdef CONFIG_GRKERNSEC_BRUTE
78280 + unsigned char suid_banned;
78281 + unsigned long suid_ban_expires;
78282 +#endif
78283 +
78284 /* Hash table maintenance information */
78285 struct hlist_node uidhash_node;
78286 kuid_t uid;
78287 @@ -1150,8 +1183,8 @@ struct task_struct {
78288 struct list_head thread_group;
78289
78290 struct completion *vfork_done; /* for vfork() */
78291 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
78292 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
78293 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
78294 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
78295
78296 cputime_t utime, stime, utimescaled, stimescaled;
78297 cputime_t gtime;
78298 @@ -1176,11 +1209,6 @@ struct task_struct {
78299 struct task_cputime cputime_expires;
78300 struct list_head cpu_timers[3];
78301
78302 -/* process credentials */
78303 - const struct cred __rcu *real_cred; /* objective and real subjective task
78304 - * credentials (COW) */
78305 - const struct cred __rcu *cred; /* effective (overridable) subjective task
78306 - * credentials (COW) */
78307 char comm[TASK_COMM_LEN]; /* executable name excluding path
78308 - access with [gs]et_task_comm (which lock
78309 it with task_lock())
78310 @@ -1197,6 +1225,10 @@ struct task_struct {
78311 #endif
78312 /* CPU-specific state of this task */
78313 struct thread_struct thread;
78314 +/* thread_info moved to task_struct */
78315 +#ifdef CONFIG_X86
78316 + struct thread_info tinfo;
78317 +#endif
78318 /* filesystem information */
78319 struct fs_struct *fs;
78320 /* open file information */
78321 @@ -1270,6 +1302,10 @@ struct task_struct {
78322 gfp_t lockdep_reclaim_gfp;
78323 #endif
78324
78325 +/* process credentials */
78326 + const struct cred __rcu *real_cred; /* objective and real subjective task
78327 + * credentials (COW) */
78328 +
78329 /* journalling filesystem info */
78330 void *journal_info;
78331
78332 @@ -1308,6 +1344,10 @@ struct task_struct {
78333 /* cg_list protected by css_set_lock and tsk->alloc_lock */
78334 struct list_head cg_list;
78335 #endif
78336 +
78337 + const struct cred __rcu *cred; /* effective (overridable) subjective task
78338 + * credentials (COW) */
78339 +
78340 #ifdef CONFIG_FUTEX
78341 struct robust_list_head __user *robust_list;
78342 #ifdef CONFIG_COMPAT
78343 @@ -1411,8 +1451,78 @@ struct task_struct {
78344 unsigned int sequential_io;
78345 unsigned int sequential_io_avg;
78346 #endif
78347 +
78348 +#ifdef CONFIG_GRKERNSEC
78349 + /* grsecurity */
78350 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78351 + u64 exec_id;
78352 +#endif
78353 +#ifdef CONFIG_GRKERNSEC_SETXID
78354 + const struct cred *delayed_cred;
78355 +#endif
78356 + struct dentry *gr_chroot_dentry;
78357 + struct acl_subject_label *acl;
78358 + struct acl_subject_label *tmpacl;
78359 + struct acl_role_label *role;
78360 + struct file *exec_file;
78361 + unsigned long brute_expires;
78362 + u16 acl_role_id;
78363 + u8 inherited;
78364 + /* is this the task that authenticated to the special role */
78365 + u8 acl_sp_role;
78366 + u8 is_writable;
78367 + u8 brute;
78368 + u8 gr_is_chrooted;
78369 +#endif
78370 +
78371 };
78372
78373 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
78374 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
78375 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
78376 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
78377 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
78378 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
78379 +
78380 +#ifdef CONFIG_PAX_SOFTMODE
78381 +extern int pax_softmode;
78382 +#endif
78383 +
78384 +extern int pax_check_flags(unsigned long *);
78385 +
78386 +/* if tsk != current then task_lock must be held on it */
78387 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
78388 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
78389 +{
78390 + if (likely(tsk->mm))
78391 + return tsk->mm->pax_flags;
78392 + else
78393 + return 0UL;
78394 +}
78395 +
78396 +/* if tsk != current then task_lock must be held on it */
78397 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
78398 +{
78399 + if (likely(tsk->mm)) {
78400 + tsk->mm->pax_flags = flags;
78401 + return 0;
78402 + }
78403 + return -EINVAL;
78404 +}
78405 +#endif
78406 +
78407 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
78408 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
78409 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
78410 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
78411 +#endif
78412 +
78413 +struct path;
78414 +extern char *pax_get_path(const struct path *path, char *buf, int buflen);
78415 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
78416 +extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
78417 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
78418 +
78419 /* Future-safe accessor for struct task_struct's cpus_allowed. */
78420 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
78421
78422 @@ -1471,7 +1581,7 @@ struct pid_namespace;
78423 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
78424 struct pid_namespace *ns);
78425
78426 -static inline pid_t task_pid_nr(struct task_struct *tsk)
78427 +static inline pid_t task_pid_nr(const struct task_struct *tsk)
78428 {
78429 return tsk->pid;
78430 }
78431 @@ -1921,7 +2031,9 @@ void yield(void);
78432 extern struct exec_domain default_exec_domain;
78433
78434 union thread_union {
78435 +#ifndef CONFIG_X86
78436 struct thread_info thread_info;
78437 +#endif
78438 unsigned long stack[THREAD_SIZE/sizeof(long)];
78439 };
78440
78441 @@ -1954,6 +2066,7 @@ extern struct pid_namespace init_pid_ns;
78442 */
78443
78444 extern struct task_struct *find_task_by_vpid(pid_t nr);
78445 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
78446 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
78447 struct pid_namespace *ns);
78448
78449 @@ -2118,7 +2231,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
78450 extern void exit_itimers(struct signal_struct *);
78451 extern void flush_itimer_signals(void);
78452
78453 -extern void do_group_exit(int);
78454 +extern __noreturn void do_group_exit(int);
78455
78456 extern int allow_signal(int);
78457 extern int disallow_signal(int);
78458 @@ -2309,9 +2422,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
78459
78460 #endif
78461
78462 -static inline int object_is_on_stack(void *obj)
78463 +static inline int object_starts_on_stack(void *obj)
78464 {
78465 - void *stack = task_stack_page(current);
78466 + const void *stack = task_stack_page(current);
78467
78468 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
78469 }
78470 diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
78471 index bf8086b..962b035 100644
78472 --- a/include/linux/sched/sysctl.h
78473 +++ b/include/linux/sched/sysctl.h
78474 @@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
78475 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
78476
78477 extern int sysctl_max_map_count;
78478 +extern unsigned long sysctl_heap_stack_gap;
78479
78480 extern unsigned int sysctl_sched_latency;
78481 extern unsigned int sysctl_sched_min_granularity;
78482 diff --git a/include/linux/security.h b/include/linux/security.h
78483 index 9d37e2b..43368e4 100644
78484 --- a/include/linux/security.h
78485 +++ b/include/linux/security.h
78486 @@ -27,6 +27,7 @@
78487 #include <linux/slab.h>
78488 #include <linux/err.h>
78489 #include <linux/string.h>
78490 +#include <linux/grsecurity.h>
78491
78492 struct linux_binprm;
78493 struct cred;
78494 @@ -116,8 +117,6 @@ struct seq_file;
78495
78496 extern int cap_netlink_send(struct sock *sk, struct sk_buff *skb);
78497
78498 -void reset_security_ops(void);
78499 -
78500 #ifdef CONFIG_MMU
78501 extern unsigned long mmap_min_addr;
78502 extern unsigned long dac_mmap_min_addr;
78503 diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
78504 index dc368b8..e895209 100644
78505 --- a/include/linux/semaphore.h
78506 +++ b/include/linux/semaphore.h
78507 @@ -37,7 +37,7 @@ static inline void sema_init(struct semaphore *sem, int val)
78508 }
78509
78510 extern void down(struct semaphore *sem);
78511 -extern int __must_check down_interruptible(struct semaphore *sem);
78512 +extern int __must_check down_interruptible(struct semaphore *sem) __intentional_overflow(-1);
78513 extern int __must_check down_killable(struct semaphore *sem);
78514 extern int __must_check down_trylock(struct semaphore *sem);
78515 extern int __must_check down_timeout(struct semaphore *sem, long jiffies);
78516 diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
78517 index 4e32edc..f8f2d18 100644
78518 --- a/include/linux/seq_file.h
78519 +++ b/include/linux/seq_file.h
78520 @@ -26,6 +26,9 @@ struct seq_file {
78521 struct mutex lock;
78522 const struct seq_operations *op;
78523 int poll_event;
78524 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
78525 + u64 exec_id;
78526 +#endif
78527 #ifdef CONFIG_USER_NS
78528 struct user_namespace *user_ns;
78529 #endif
78530 @@ -38,6 +41,7 @@ struct seq_operations {
78531 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
78532 int (*show) (struct seq_file *m, void *v);
78533 };
78534 +typedef struct seq_operations __no_const seq_operations_no_const;
78535
78536 #define SEQ_SKIP 1
78537
78538 diff --git a/include/linux/shm.h b/include/linux/shm.h
78539 index 429c199..4d42e38 100644
78540 --- a/include/linux/shm.h
78541 +++ b/include/linux/shm.h
78542 @@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
78543
78544 /* The task created the shm object. NULL if the task is dead. */
78545 struct task_struct *shm_creator;
78546 +#ifdef CONFIG_GRKERNSEC
78547 + time_t shm_createtime;
78548 + pid_t shm_lapid;
78549 +#endif
78550 };
78551
78552 /* shm_mode upper byte flags */
78553 diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
78554 index f66f346..2e304d5 100644
78555 --- a/include/linux/skbuff.h
78556 +++ b/include/linux/skbuff.h
78557 @@ -639,7 +639,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
78558 extern struct sk_buff *__alloc_skb(unsigned int size,
78559 gfp_t priority, int flags, int node);
78560 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
78561 -static inline struct sk_buff *alloc_skb(unsigned int size,
78562 +static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
78563 gfp_t priority)
78564 {
78565 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
78566 @@ -755,7 +755,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
78567 */
78568 static inline int skb_queue_empty(const struct sk_buff_head *list)
78569 {
78570 - return list->next == (struct sk_buff *)list;
78571 + return list->next == (const struct sk_buff *)list;
78572 }
78573
78574 /**
78575 @@ -768,7 +768,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
78576 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
78577 const struct sk_buff *skb)
78578 {
78579 - return skb->next == (struct sk_buff *)list;
78580 + return skb->next == (const struct sk_buff *)list;
78581 }
78582
78583 /**
78584 @@ -781,7 +781,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
78585 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
78586 const struct sk_buff *skb)
78587 {
78588 - return skb->prev == (struct sk_buff *)list;
78589 + return skb->prev == (const struct sk_buff *)list;
78590 }
78591
78592 /**
78593 @@ -1741,7 +1741,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
78594 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
78595 */
78596 #ifndef NET_SKB_PAD
78597 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
78598 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
78599 #endif
78600
78601 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
78602 @@ -2339,7 +2339,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
78603 int noblock, int *err);
78604 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
78605 struct poll_table_struct *wait);
78606 -extern int skb_copy_datagram_iovec(const struct sk_buff *from,
78607 +extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
78608 int offset, struct iovec *to,
78609 int size);
78610 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
78611 @@ -2618,6 +2618,9 @@ static inline void nf_reset(struct sk_buff *skb)
78612 nf_bridge_put(skb->nf_bridge);
78613 skb->nf_bridge = NULL;
78614 #endif
78615 +#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
78616 + skb->nf_trace = 0;
78617 +#endif
78618 }
78619
78620 static inline void nf_reset_trace(struct sk_buff *skb)
78621 diff --git a/include/linux/slab.h b/include/linux/slab.h
78622 index 74f1058..914b7da 100644
78623 --- a/include/linux/slab.h
78624 +++ b/include/linux/slab.h
78625 @@ -14,15 +14,29 @@
78626 #include <linux/gfp.h>
78627 #include <linux/types.h>
78628 #include <linux/workqueue.h>
78629 -
78630 +#include <linux/err.h>
78631
78632 /*
78633 * Flags to pass to kmem_cache_create().
78634 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
78635 */
78636 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
78637 +
78638 +#ifdef CONFIG_PAX_USERCOPY_SLABS
78639 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
78640 +#else
78641 +#define SLAB_USERCOPY 0x00000000UL
78642 +#endif
78643 +
78644 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
78645 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
78646 +
78647 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
78648 +#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
78649 +#else
78650 +#define SLAB_NO_SANITIZE 0x00000000UL
78651 +#endif
78652 +
78653 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
78654 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
78655 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
78656 @@ -91,10 +105,13 @@
78657 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
78658 * Both make kfree a no-op.
78659 */
78660 -#define ZERO_SIZE_PTR ((void *)16)
78661 +#define ZERO_SIZE_PTR \
78662 +({ \
78663 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
78664 + (void *)(-MAX_ERRNO-1L); \
78665 +})
78666
78667 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
78668 - (unsigned long)ZERO_SIZE_PTR)
78669 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
78670
78671 #include <linux/kmemleak.h>
78672
78673 @@ -135,6 +152,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
78674 void kfree(const void *);
78675 void kzfree(const void *);
78676 size_t ksize(const void *);
78677 +const char *check_heap_object(const void *ptr, unsigned long n);
78678 +bool is_usercopy_object(const void *ptr);
78679
78680 /*
78681 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
78682 @@ -167,7 +186,7 @@ struct kmem_cache {
78683 unsigned int align; /* Alignment as calculated */
78684 unsigned long flags; /* Active flags on the slab */
78685 const char *name; /* Slab name for sysfs */
78686 - int refcount; /* Use counter */
78687 + atomic_t refcount; /* Use counter */
78688 void (*ctor)(void *); /* Called on object slot creation */
78689 struct list_head list; /* List of all slab caches on the system */
78690 };
78691 @@ -241,6 +260,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
78692 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
78693 #endif
78694
78695 +#ifdef CONFIG_PAX_USERCOPY_SLABS
78696 +extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
78697 +#endif
78698 +
78699 /*
78700 * Figure out which kmalloc slab an allocation of a certain size
78701 * belongs to.
78702 @@ -249,7 +272,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
78703 * 2 = 120 .. 192 bytes
78704 * n = 2^(n-1) .. 2^n -1
78705 */
78706 -static __always_inline int kmalloc_index(size_t size)
78707 +static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
78708 {
78709 if (!size)
78710 return 0;
78711 @@ -292,11 +315,11 @@ static __always_inline int kmalloc_index(size_t size)
78712 }
78713 #endif /* !CONFIG_SLOB */
78714
78715 -void *__kmalloc(size_t size, gfp_t flags);
78716 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
78717 void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags);
78718
78719 #ifdef CONFIG_NUMA
78720 -void *__kmalloc_node(size_t size, gfp_t flags, int node);
78721 +void *__kmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
78722 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
78723 #else
78724 static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
78725 diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
78726 index e9346b4..1494959 100644
78727 --- a/include/linux/slab_def.h
78728 +++ b/include/linux/slab_def.h
78729 @@ -36,7 +36,7 @@ struct kmem_cache {
78730 /* 4) cache creation/removal */
78731 const char *name;
78732 struct list_head list;
78733 - int refcount;
78734 + atomic_t refcount;
78735 int object_size;
78736 int align;
78737
78738 @@ -52,10 +52,14 @@ struct kmem_cache {
78739 unsigned long node_allocs;
78740 unsigned long node_frees;
78741 unsigned long node_overflow;
78742 - atomic_t allochit;
78743 - atomic_t allocmiss;
78744 - atomic_t freehit;
78745 - atomic_t freemiss;
78746 + atomic_unchecked_t allochit;
78747 + atomic_unchecked_t allocmiss;
78748 + atomic_unchecked_t freehit;
78749 + atomic_unchecked_t freemiss;
78750 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
78751 + atomic_unchecked_t sanitized;
78752 + atomic_unchecked_t not_sanitized;
78753 +#endif
78754
78755 /*
78756 * If debugging is enabled, then the allocator can add additional
78757 diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
78758 index cc0b67e..a0329b1 100644
78759 --- a/include/linux/slub_def.h
78760 +++ b/include/linux/slub_def.h
78761 @@ -74,7 +74,7 @@ struct kmem_cache {
78762 struct kmem_cache_order_objects max;
78763 struct kmem_cache_order_objects min;
78764 gfp_t allocflags; /* gfp flags to use on each alloc */
78765 - int refcount; /* Refcount for slab cache destroy */
78766 + atomic_t refcount; /* Refcount for slab cache destroy */
78767 void (*ctor)(void *);
78768 int inuse; /* Offset to metadata */
78769 int align; /* Alignment */
78770 diff --git a/include/linux/smp.h b/include/linux/smp.h
78771 index 731f523..3340268 100644
78772 --- a/include/linux/smp.h
78773 +++ b/include/linux/smp.h
78774 @@ -186,7 +186,9 @@ static inline void __smp_call_function_single(int cpuid,
78775 #endif
78776
78777 #define get_cpu() ({ preempt_disable(); smp_processor_id(); })
78778 +#define raw_get_cpu() ({ raw_preempt_disable(); raw_smp_processor_id(); })
78779 #define put_cpu() preempt_enable()
78780 +#define raw_put_cpu_no_resched() raw_preempt_enable_no_resched()
78781
78782 /*
78783 * Callback to arch code if there's nosmp or maxcpus=0 on the
78784 diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
78785 index 54f91d3..be2c379 100644
78786 --- a/include/linux/sock_diag.h
78787 +++ b/include/linux/sock_diag.h
78788 @@ -11,7 +11,7 @@ struct sock;
78789 struct sock_diag_handler {
78790 __u8 family;
78791 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
78792 -};
78793 +} __do_const;
78794
78795 int sock_diag_register(const struct sock_diag_handler *h);
78796 void sock_diag_unregister(const struct sock_diag_handler *h);
78797 diff --git a/include/linux/sonet.h b/include/linux/sonet.h
78798 index 680f9a3..f13aeb0 100644
78799 --- a/include/linux/sonet.h
78800 +++ b/include/linux/sonet.h
78801 @@ -7,7 +7,7 @@
78802 #include <uapi/linux/sonet.h>
78803
78804 struct k_sonet_stats {
78805 -#define __HANDLE_ITEM(i) atomic_t i
78806 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
78807 __SONET_ITEMS
78808 #undef __HANDLE_ITEM
78809 };
78810 diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
78811 index 07d8e53..dc934c9 100644
78812 --- a/include/linux/sunrpc/addr.h
78813 +++ b/include/linux/sunrpc/addr.h
78814 @@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
78815 {
78816 switch (sap->sa_family) {
78817 case AF_INET:
78818 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
78819 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
78820 case AF_INET6:
78821 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
78822 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
78823 }
78824 return 0;
78825 }
78826 @@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
78827 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
78828 const struct sockaddr *src)
78829 {
78830 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
78831 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
78832 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
78833
78834 dsin->sin_family = ssin->sin_family;
78835 @@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
78836 if (sa->sa_family != AF_INET6)
78837 return 0;
78838
78839 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
78840 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
78841 }
78842
78843 #endif /* _LINUX_SUNRPC_ADDR_H */
78844 diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
78845 index 6740801..c535f27 100644
78846 --- a/include/linux/sunrpc/clnt.h
78847 +++ b/include/linux/sunrpc/clnt.h
78848 @@ -96,7 +96,7 @@ struct rpc_procinfo {
78849 unsigned int p_timer; /* Which RTT timer to use */
78850 u32 p_statidx; /* Which procedure to account */
78851 const char * p_name; /* name of procedure */
78852 -};
78853 +} __do_const;
78854
78855 #ifdef __KERNEL__
78856
78857 diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
78858 index 6eecfc2..7ada79d 100644
78859 --- a/include/linux/sunrpc/svc.h
78860 +++ b/include/linux/sunrpc/svc.h
78861 @@ -410,7 +410,7 @@ struct svc_procedure {
78862 unsigned int pc_count; /* call count */
78863 unsigned int pc_cachetype; /* cache info (NFS) */
78864 unsigned int pc_xdrressize; /* maximum size of XDR reply */
78865 -};
78866 +} __do_const;
78867
78868 /*
78869 * Function prototypes.
78870 diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
78871 index 0b8e3e6..33e0a01 100644
78872 --- a/include/linux/sunrpc/svc_rdma.h
78873 +++ b/include/linux/sunrpc/svc_rdma.h
78874 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
78875 extern unsigned int svcrdma_max_requests;
78876 extern unsigned int svcrdma_max_req_size;
78877
78878 -extern atomic_t rdma_stat_recv;
78879 -extern atomic_t rdma_stat_read;
78880 -extern atomic_t rdma_stat_write;
78881 -extern atomic_t rdma_stat_sq_starve;
78882 -extern atomic_t rdma_stat_rq_starve;
78883 -extern atomic_t rdma_stat_rq_poll;
78884 -extern atomic_t rdma_stat_rq_prod;
78885 -extern atomic_t rdma_stat_sq_poll;
78886 -extern atomic_t rdma_stat_sq_prod;
78887 +extern atomic_unchecked_t rdma_stat_recv;
78888 +extern atomic_unchecked_t rdma_stat_read;
78889 +extern atomic_unchecked_t rdma_stat_write;
78890 +extern atomic_unchecked_t rdma_stat_sq_starve;
78891 +extern atomic_unchecked_t rdma_stat_rq_starve;
78892 +extern atomic_unchecked_t rdma_stat_rq_poll;
78893 +extern atomic_unchecked_t rdma_stat_rq_prod;
78894 +extern atomic_unchecked_t rdma_stat_sq_poll;
78895 +extern atomic_unchecked_t rdma_stat_sq_prod;
78896
78897 #define RPCRDMA_VERSION 1
78898
78899 diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
78900 index 8d71d65..f79586e 100644
78901 --- a/include/linux/sunrpc/svcauth.h
78902 +++ b/include/linux/sunrpc/svcauth.h
78903 @@ -120,7 +120,7 @@ struct auth_ops {
78904 int (*release)(struct svc_rqst *rq);
78905 void (*domain_release)(struct auth_domain *);
78906 int (*set_client)(struct svc_rqst *rq);
78907 -};
78908 +} __do_const;
78909
78910 #define SVC_GARBAGE 1
78911 #define SVC_SYSERR 2
78912 diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
78913 index a5ffd32..0935dea 100644
78914 --- a/include/linux/swiotlb.h
78915 +++ b/include/linux/swiotlb.h
78916 @@ -60,7 +60,8 @@ extern void
78917
78918 extern void
78919 swiotlb_free_coherent(struct device *hwdev, size_t size,
78920 - void *vaddr, dma_addr_t dma_handle);
78921 + void *vaddr, dma_addr_t dma_handle,
78922 + struct dma_attrs *attrs);
78923
78924 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
78925 unsigned long offset, size_t size,
78926 diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
78927 index 7fac04e..de57300 100644
78928 --- a/include/linux/syscalls.h
78929 +++ b/include/linux/syscalls.h
78930 @@ -97,8 +97,14 @@ struct sigaltstack;
78931 #define __MAP(n,...) __MAP##n(__VA_ARGS__)
78932
78933 #define __SC_DECL(t, a) t a
78934 +#define __TYPE_IS_U(t) (__same_type((t)0, 0UL) || __same_type((t)0, 0U) || __same_type((t)0, (unsigned short)0) || __same_type((t)0, (unsigned char)0))
78935 #define __TYPE_IS_LL(t) (__same_type((t)0, 0LL) || __same_type((t)0, 0ULL))
78936 -#define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
78937 +#define __SC_LONG(t, a) __typeof( \
78938 + __builtin_choose_expr( \
78939 + sizeof(t) > sizeof(int), \
78940 + (t) 0, \
78941 + __builtin_choose_expr(__TYPE_IS_U(t), 0UL, 0L) \
78942 + )) a
78943 #define __SC_CAST(t, a) (t) a
78944 #define __SC_ARGS(t, a) a
78945 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
78946 @@ -363,11 +369,11 @@ asmlinkage long sys_sync(void);
78947 asmlinkage long sys_fsync(unsigned int fd);
78948 asmlinkage long sys_fdatasync(unsigned int fd);
78949 asmlinkage long sys_bdflush(int func, long data);
78950 -asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
78951 - char __user *type, unsigned long flags,
78952 +asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
78953 + const char __user *type, unsigned long flags,
78954 void __user *data);
78955 -asmlinkage long sys_umount(char __user *name, int flags);
78956 -asmlinkage long sys_oldumount(char __user *name);
78957 +asmlinkage long sys_umount(const char __user *name, int flags);
78958 +asmlinkage long sys_oldumount(const char __user *name);
78959 asmlinkage long sys_truncate(const char __user *path, long length);
78960 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
78961 asmlinkage long sys_stat(const char __user *filename,
78962 @@ -579,7 +585,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
78963 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
78964 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
78965 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
78966 - struct sockaddr __user *, int);
78967 + struct sockaddr __user *, int) __intentional_overflow(0);
78968 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
78969 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
78970 unsigned int vlen, unsigned flags);
78971 diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
78972 index 27b3b0b..e093dd9 100644
78973 --- a/include/linux/syscore_ops.h
78974 +++ b/include/linux/syscore_ops.h
78975 @@ -16,7 +16,7 @@ struct syscore_ops {
78976 int (*suspend)(void);
78977 void (*resume)(void);
78978 void (*shutdown)(void);
78979 -};
78980 +} __do_const;
78981
78982 extern void register_syscore_ops(struct syscore_ops *ops);
78983 extern void unregister_syscore_ops(struct syscore_ops *ops);
78984 diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
78985 index 14a8ff2..af52bad 100644
78986 --- a/include/linux/sysctl.h
78987 +++ b/include/linux/sysctl.h
78988 @@ -34,13 +34,13 @@ struct ctl_table_root;
78989 struct ctl_table_header;
78990 struct ctl_dir;
78991
78992 -typedef struct ctl_table ctl_table;
78993 -
78994 typedef int proc_handler (struct ctl_table *ctl, int write,
78995 void __user *buffer, size_t *lenp, loff_t *ppos);
78996
78997 extern int proc_dostring(struct ctl_table *, int,
78998 void __user *, size_t *, loff_t *);
78999 +extern int proc_dostring_modpriv(struct ctl_table *, int,
79000 + void __user *, size_t *, loff_t *);
79001 extern int proc_dointvec(struct ctl_table *, int,
79002 void __user *, size_t *, loff_t *);
79003 extern int proc_dointvec_minmax(struct ctl_table *, int,
79004 @@ -115,7 +115,9 @@ struct ctl_table
79005 struct ctl_table_poll *poll;
79006 void *extra1;
79007 void *extra2;
79008 -};
79009 +} __do_const;
79010 +typedef struct ctl_table __no_const ctl_table_no_const;
79011 +typedef struct ctl_table ctl_table;
79012
79013 struct ctl_node {
79014 struct rb_node node;
79015 diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
79016 index 11baec7..706f99f 100644
79017 --- a/include/linux/sysfs.h
79018 +++ b/include/linux/sysfs.h
79019 @@ -33,7 +33,8 @@ struct attribute {
79020 struct lock_class_key *key;
79021 struct lock_class_key skey;
79022 #endif
79023 -};
79024 +} __do_const;
79025 +typedef struct attribute __no_const attribute_no_const;
79026
79027 /**
79028 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
79029 @@ -62,7 +63,8 @@ struct attribute_group {
79030 struct attribute *, int);
79031 struct attribute **attrs;
79032 struct bin_attribute **bin_attrs;
79033 -};
79034 +} __do_const;
79035 +typedef struct attribute_group __no_const attribute_group_no_const;
79036
79037 /**
79038 * Use these macros to make defining attributes easier. See include/linux/device.h
79039 @@ -126,7 +128,8 @@ struct bin_attribute {
79040 char *, loff_t, size_t);
79041 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
79042 struct vm_area_struct *vma);
79043 -};
79044 +} __do_const;
79045 +typedef struct bin_attribute __no_const bin_attribute_no_const;
79046
79047 /**
79048 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
79049 diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
79050 index 7faf933..9b85a0c 100644
79051 --- a/include/linux/sysrq.h
79052 +++ b/include/linux/sysrq.h
79053 @@ -16,6 +16,7 @@
79054
79055 #include <linux/errno.h>
79056 #include <linux/types.h>
79057 +#include <linux/compiler.h>
79058
79059 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
79060 #define SYSRQ_DEFAULT_ENABLE 1
79061 @@ -36,7 +37,7 @@ struct sysrq_key_op {
79062 char *help_msg;
79063 char *action_msg;
79064 int enable_mask;
79065 -};
79066 +} __do_const;
79067
79068 #ifdef CONFIG_MAGIC_SYSRQ
79069
79070 diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
79071 index 4ae6f32..425d3e1 100644
79072 --- a/include/linux/thread_info.h
79073 +++ b/include/linux/thread_info.h
79074 @@ -150,6 +150,15 @@ static inline bool test_and_clear_restore_sigmask(void)
79075 #error "no set_restore_sigmask() provided and default one won't work"
79076 #endif
79077
79078 +extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
79079 +static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
79080 +{
79081 +#ifndef CONFIG_PAX_USERCOPY_DEBUG
79082 + if (!__builtin_constant_p(n))
79083 +#endif
79084 + __check_object_size(ptr, n, to_user);
79085 +}
79086 +
79087 #endif /* __KERNEL__ */
79088
79089 #endif /* _LINUX_THREAD_INFO_H */
79090 diff --git a/include/linux/tty.h b/include/linux/tty.h
79091 index 64f8646..1515fc7 100644
79092 --- a/include/linux/tty.h
79093 +++ b/include/linux/tty.h
79094 @@ -197,7 +197,7 @@ struct tty_port {
79095 const struct tty_port_operations *ops; /* Port operations */
79096 spinlock_t lock; /* Lock protecting tty field */
79097 int blocked_open; /* Waiting to open */
79098 - int count; /* Usage count */
79099 + atomic_t count; /* Usage count */
79100 wait_queue_head_t open_wait; /* Open waiters */
79101 wait_queue_head_t close_wait; /* Close waiters */
79102 wait_queue_head_t delta_msr_wait; /* Modem status change */
79103 @@ -546,7 +546,7 @@ extern int tty_port_open(struct tty_port *port,
79104 struct tty_struct *tty, struct file *filp);
79105 static inline int tty_port_users(struct tty_port *port)
79106 {
79107 - return port->count + port->blocked_open;
79108 + return atomic_read(&port->count) + port->blocked_open;
79109 }
79110
79111 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
79112 diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
79113 index 756a609..b302dd6 100644
79114 --- a/include/linux/tty_driver.h
79115 +++ b/include/linux/tty_driver.h
79116 @@ -285,7 +285,7 @@ struct tty_operations {
79117 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
79118 #endif
79119 const struct file_operations *proc_fops;
79120 -};
79121 +} __do_const;
79122
79123 struct tty_driver {
79124 int magic; /* magic number for this structure */
79125 diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
79126 index f15c898..207b7d1 100644
79127 --- a/include/linux/tty_ldisc.h
79128 +++ b/include/linux/tty_ldisc.h
79129 @@ -211,7 +211,7 @@ struct tty_ldisc_ops {
79130
79131 struct module *owner;
79132
79133 - int refcount;
79134 + atomic_t refcount;
79135 };
79136
79137 struct tty_ldisc {
79138 diff --git a/include/linux/types.h b/include/linux/types.h
79139 index 4d118ba..c3ee9bf 100644
79140 --- a/include/linux/types.h
79141 +++ b/include/linux/types.h
79142 @@ -176,10 +176,26 @@ typedef struct {
79143 int counter;
79144 } atomic_t;
79145
79146 +#ifdef CONFIG_PAX_REFCOUNT
79147 +typedef struct {
79148 + int counter;
79149 +} atomic_unchecked_t;
79150 +#else
79151 +typedef atomic_t atomic_unchecked_t;
79152 +#endif
79153 +
79154 #ifdef CONFIG_64BIT
79155 typedef struct {
79156 long counter;
79157 } atomic64_t;
79158 +
79159 +#ifdef CONFIG_PAX_REFCOUNT
79160 +typedef struct {
79161 + long counter;
79162 +} atomic64_unchecked_t;
79163 +#else
79164 +typedef atomic64_t atomic64_unchecked_t;
79165 +#endif
79166 #endif
79167
79168 struct list_head {
79169 diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
79170 index 5ca0951..ab496a5 100644
79171 --- a/include/linux/uaccess.h
79172 +++ b/include/linux/uaccess.h
79173 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
79174 long ret; \
79175 mm_segment_t old_fs = get_fs(); \
79176 \
79177 - set_fs(KERNEL_DS); \
79178 pagefault_disable(); \
79179 - ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
79180 - pagefault_enable(); \
79181 + set_fs(KERNEL_DS); \
79182 + ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
79183 set_fs(old_fs); \
79184 + pagefault_enable(); \
79185 ret; \
79186 })
79187
79188 diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
79189 index 8e522cbc..aa8572d 100644
79190 --- a/include/linux/uidgid.h
79191 +++ b/include/linux/uidgid.h
79192 @@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
79193
79194 #endif /* CONFIG_USER_NS */
79195
79196 +#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
79197 +#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
79198 +#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
79199 +#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
79200 +
79201 #endif /* _LINUX_UIDGID_H */
79202 diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
79203 index 99c1b4d..562e6f3 100644
79204 --- a/include/linux/unaligned/access_ok.h
79205 +++ b/include/linux/unaligned/access_ok.h
79206 @@ -4,34 +4,34 @@
79207 #include <linux/kernel.h>
79208 #include <asm/byteorder.h>
79209
79210 -static inline u16 get_unaligned_le16(const void *p)
79211 +static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
79212 {
79213 - return le16_to_cpup((__le16 *)p);
79214 + return le16_to_cpup((const __le16 *)p);
79215 }
79216
79217 -static inline u32 get_unaligned_le32(const void *p)
79218 +static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
79219 {
79220 - return le32_to_cpup((__le32 *)p);
79221 + return le32_to_cpup((const __le32 *)p);
79222 }
79223
79224 -static inline u64 get_unaligned_le64(const void *p)
79225 +static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
79226 {
79227 - return le64_to_cpup((__le64 *)p);
79228 + return le64_to_cpup((const __le64 *)p);
79229 }
79230
79231 -static inline u16 get_unaligned_be16(const void *p)
79232 +static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
79233 {
79234 - return be16_to_cpup((__be16 *)p);
79235 + return be16_to_cpup((const __be16 *)p);
79236 }
79237
79238 -static inline u32 get_unaligned_be32(const void *p)
79239 +static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
79240 {
79241 - return be32_to_cpup((__be32 *)p);
79242 + return be32_to_cpup((const __be32 *)p);
79243 }
79244
79245 -static inline u64 get_unaligned_be64(const void *p)
79246 +static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
79247 {
79248 - return be64_to_cpup((__be64 *)p);
79249 + return be64_to_cpup((const __be64 *)p);
79250 }
79251
79252 static inline void put_unaligned_le16(u16 val, void *p)
79253 diff --git a/include/linux/usb.h b/include/linux/usb.h
79254 index 6b02370..2355ffa 100644
79255 --- a/include/linux/usb.h
79256 +++ b/include/linux/usb.h
79257 @@ -563,7 +563,7 @@ struct usb_device {
79258 int maxchild;
79259
79260 u32 quirks;
79261 - atomic_t urbnum;
79262 + atomic_unchecked_t urbnum;
79263
79264 unsigned long active_duration;
79265
79266 @@ -1639,7 +1639,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
79267
79268 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
79269 __u8 request, __u8 requesttype, __u16 value, __u16 index,
79270 - void *data, __u16 size, int timeout);
79271 + void *data, __u16 size, int timeout) __intentional_overflow(-1);
79272 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
79273 void *data, int len, int *actual_length, int timeout);
79274 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
79275 diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
79276 index e452ba6..78f8e80 100644
79277 --- a/include/linux/usb/renesas_usbhs.h
79278 +++ b/include/linux/usb/renesas_usbhs.h
79279 @@ -39,7 +39,7 @@ enum {
79280 */
79281 struct renesas_usbhs_driver_callback {
79282 int (*notify_hotplug)(struct platform_device *pdev);
79283 -};
79284 +} __no_const;
79285
79286 /*
79287 * callback functions for platform
79288 diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
79289 index 6f8fbcf..8259001 100644
79290 --- a/include/linux/vermagic.h
79291 +++ b/include/linux/vermagic.h
79292 @@ -25,9 +25,35 @@
79293 #define MODULE_ARCH_VERMAGIC ""
79294 #endif
79295
79296 +#ifdef CONFIG_PAX_REFCOUNT
79297 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
79298 +#else
79299 +#define MODULE_PAX_REFCOUNT ""
79300 +#endif
79301 +
79302 +#ifdef CONSTIFY_PLUGIN
79303 +#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
79304 +#else
79305 +#define MODULE_CONSTIFY_PLUGIN ""
79306 +#endif
79307 +
79308 +#ifdef STACKLEAK_PLUGIN
79309 +#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
79310 +#else
79311 +#define MODULE_STACKLEAK_PLUGIN ""
79312 +#endif
79313 +
79314 +#ifdef CONFIG_GRKERNSEC
79315 +#define MODULE_GRSEC "GRSEC "
79316 +#else
79317 +#define MODULE_GRSEC ""
79318 +#endif
79319 +
79320 #define VERMAGIC_STRING \
79321 UTS_RELEASE " " \
79322 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
79323 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
79324 - MODULE_ARCH_VERMAGIC
79325 + MODULE_ARCH_VERMAGIC \
79326 + MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
79327 + MODULE_GRSEC
79328
79329 diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
79330 index 502073a..a7de024 100644
79331 --- a/include/linux/vga_switcheroo.h
79332 +++ b/include/linux/vga_switcheroo.h
79333 @@ -63,8 +63,8 @@ int vga_switcheroo_get_client_state(struct pci_dev *dev);
79334
79335 void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic);
79336
79337 -int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain);
79338 -int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain);
79339 +int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain);
79340 +int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain);
79341 #else
79342
79343 static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
79344 @@ -81,8 +81,8 @@ static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return
79345
79346 static inline void vga_switcheroo_set_dynamic_switch(struct pci_dev *pdev, enum vga_switcheroo_state dynamic) {}
79347
79348 -static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
79349 -static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, struct dev_pm_domain *domain) { return -EINVAL; }
79350 +static inline int vga_switcheroo_init_domain_pm_ops(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
79351 +static inline int vga_switcheroo_init_domain_pm_optimus_hdmi_audio(struct device *dev, dev_pm_domain_no_const *domain) { return -EINVAL; }
79352
79353 #endif
79354 #endif /* _LINUX_VGA_SWITCHEROO_H_ */
79355 diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
79356 index 4b8a891..cb8df6e 100644
79357 --- a/include/linux/vmalloc.h
79358 +++ b/include/linux/vmalloc.h
79359 @@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
79360 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
79361 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
79362 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
79363 +
79364 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
79365 +#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
79366 +#endif
79367 +
79368 /* bits [20..32] reserved for arch specific ioremap internals */
79369
79370 /*
79371 @@ -142,7 +147,7 @@ extern void free_vm_area(struct vm_struct *area);
79372
79373 /* for /dev/kmem */
79374 extern long vread(char *buf, char *addr, unsigned long count);
79375 -extern long vwrite(char *buf, char *addr, unsigned long count);
79376 +extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
79377
79378 /*
79379 * Internals. Dont't use..
79380 diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
79381 index e4b9480..5a5f65a 100644
79382 --- a/include/linux/vmstat.h
79383 +++ b/include/linux/vmstat.h
79384 @@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
79385 /*
79386 * Zone based page accounting with per cpu differentials.
79387 */
79388 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79389 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
79390
79391 static inline void zone_page_state_add(long x, struct zone *zone,
79392 enum zone_stat_item item)
79393 {
79394 - atomic_long_add(x, &zone->vm_stat[item]);
79395 - atomic_long_add(x, &vm_stat[item]);
79396 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
79397 + atomic_long_add_unchecked(x, &vm_stat[item]);
79398 }
79399
79400 -static inline unsigned long global_page_state(enum zone_stat_item item)
79401 +static inline unsigned long __intentional_overflow(-1) global_page_state(enum zone_stat_item item)
79402 {
79403 - long x = atomic_long_read(&vm_stat[item]);
79404 + long x = atomic_long_read_unchecked(&vm_stat[item]);
79405 #ifdef CONFIG_SMP
79406 if (x < 0)
79407 x = 0;
79408 @@ -109,10 +109,10 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
79409 return x;
79410 }
79411
79412 -static inline unsigned long zone_page_state(struct zone *zone,
79413 +static inline unsigned long __intentional_overflow(-1) zone_page_state(struct zone *zone,
79414 enum zone_stat_item item)
79415 {
79416 - long x = atomic_long_read(&zone->vm_stat[item]);
79417 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
79418 #ifdef CONFIG_SMP
79419 if (x < 0)
79420 x = 0;
79421 @@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
79422 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
79423 enum zone_stat_item item)
79424 {
79425 - long x = atomic_long_read(&zone->vm_stat[item]);
79426 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
79427
79428 #ifdef CONFIG_SMP
79429 int cpu;
79430 @@ -220,8 +220,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
79431
79432 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
79433 {
79434 - atomic_long_inc(&zone->vm_stat[item]);
79435 - atomic_long_inc(&vm_stat[item]);
79436 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
79437 + atomic_long_inc_unchecked(&vm_stat[item]);
79438 }
79439
79440 static inline void __inc_zone_page_state(struct page *page,
79441 @@ -232,8 +232,8 @@ static inline void __inc_zone_page_state(struct page *page,
79442
79443 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
79444 {
79445 - atomic_long_dec(&zone->vm_stat[item]);
79446 - atomic_long_dec(&vm_stat[item]);
79447 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
79448 + atomic_long_dec_unchecked(&vm_stat[item]);
79449 }
79450
79451 static inline void __dec_zone_page_state(struct page *page,
79452 diff --git a/include/linux/xattr.h b/include/linux/xattr.h
79453 index 91b0a68..0e9adf6 100644
79454 --- a/include/linux/xattr.h
79455 +++ b/include/linux/xattr.h
79456 @@ -28,7 +28,7 @@ struct xattr_handler {
79457 size_t size, int handler_flags);
79458 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
79459 size_t size, int flags, int handler_flags);
79460 -};
79461 +} __do_const;
79462
79463 struct xattr {
79464 const char *name;
79465 @@ -37,6 +37,9 @@ struct xattr {
79466 };
79467
79468 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
79469 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
79470 +ssize_t pax_getxattr(struct dentry *, void *, size_t);
79471 +#endif
79472 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
79473 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
79474 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
79475 diff --git a/include/linux/zlib.h b/include/linux/zlib.h
79476 index 9c5a6b4..09c9438 100644
79477 --- a/include/linux/zlib.h
79478 +++ b/include/linux/zlib.h
79479 @@ -31,6 +31,7 @@
79480 #define _ZLIB_H
79481
79482 #include <linux/zconf.h>
79483 +#include <linux/compiler.h>
79484
79485 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
79486 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
79487 @@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
79488
79489 /* basic functions */
79490
79491 -extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
79492 +extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
79493 /*
79494 Returns the number of bytes that needs to be allocated for a per-
79495 stream workspace with the specified parameters. A pointer to this
79496 diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
79497 index c768c9f..bdcaa5a 100644
79498 --- a/include/media/v4l2-dev.h
79499 +++ b/include/media/v4l2-dev.h
79500 @@ -76,7 +76,7 @@ struct v4l2_file_operations {
79501 int (*mmap) (struct file *, struct vm_area_struct *);
79502 int (*open) (struct file *);
79503 int (*release) (struct file *);
79504 -};
79505 +} __do_const;
79506
79507 /*
79508 * Newer version of video_device, handled by videodev2.c
79509 diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
79510 index c9b1593..a572459 100644
79511 --- a/include/media/v4l2-device.h
79512 +++ b/include/media/v4l2-device.h
79513 @@ -95,7 +95,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
79514 this function returns 0. If the name ends with a digit (e.g. cx18),
79515 then the name will be set to cx18-0 since cx180 looks really odd. */
79516 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
79517 - atomic_t *instance);
79518 + atomic_unchecked_t *instance);
79519
79520 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
79521 Since the parent disappears this ensures that v4l2_dev doesn't have an
79522 diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
79523 index 9a36d92..0aafe2a 100644
79524 --- a/include/net/9p/transport.h
79525 +++ b/include/net/9p/transport.h
79526 @@ -60,7 +60,7 @@ struct p9_trans_module {
79527 int (*cancel) (struct p9_client *, struct p9_req_t *req);
79528 int (*zc_request)(struct p9_client *, struct p9_req_t *,
79529 char *, char *, int , int, int, int);
79530 -};
79531 +} __do_const;
79532
79533 void v9fs_register_trans(struct p9_trans_module *m);
79534 void v9fs_unregister_trans(struct p9_trans_module *m);
79535 diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
79536 index 1a966af..2767cf6 100644
79537 --- a/include/net/bluetooth/l2cap.h
79538 +++ b/include/net/bluetooth/l2cap.h
79539 @@ -551,7 +551,7 @@ struct l2cap_ops {
79540 void (*defer) (struct l2cap_chan *chan);
79541 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
79542 unsigned long len, int nb);
79543 -};
79544 +} __do_const;
79545
79546 struct l2cap_conn {
79547 struct hci_conn *hcon;
79548 diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
79549 index f2ae33d..c457cf0 100644
79550 --- a/include/net/caif/cfctrl.h
79551 +++ b/include/net/caif/cfctrl.h
79552 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
79553 void (*radioset_rsp)(void);
79554 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
79555 struct cflayer *client_layer);
79556 -};
79557 +} __no_const;
79558
79559 /* Link Setup Parameters for CAIF-Links. */
79560 struct cfctrl_link_param {
79561 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
79562 struct cfctrl {
79563 struct cfsrvl serv;
79564 struct cfctrl_rsp res;
79565 - atomic_t req_seq_no;
79566 - atomic_t rsp_seq_no;
79567 + atomic_unchecked_t req_seq_no;
79568 + atomic_unchecked_t rsp_seq_no;
79569 struct list_head list;
79570 /* Protects from simultaneous access to first_req list */
79571 spinlock_t info_list_lock;
79572 diff --git a/include/net/flow.h b/include/net/flow.h
79573 index 628e11b..4c475df 100644
79574 --- a/include/net/flow.h
79575 +++ b/include/net/flow.h
79576 @@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
79577
79578 extern void flow_cache_flush(void);
79579 extern void flow_cache_flush_deferred(void);
79580 -extern atomic_t flow_cache_genid;
79581 +extern atomic_unchecked_t flow_cache_genid;
79582
79583 #endif
79584 diff --git a/include/net/genetlink.h b/include/net/genetlink.h
79585 index 8e0b6c8..73cf605 100644
79586 --- a/include/net/genetlink.h
79587 +++ b/include/net/genetlink.h
79588 @@ -120,7 +120,7 @@ struct genl_ops {
79589 struct netlink_callback *cb);
79590 int (*done)(struct netlink_callback *cb);
79591 struct list_head ops_list;
79592 -};
79593 +} __do_const;
79594
79595 extern int __genl_register_family(struct genl_family *family);
79596
79597 diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
79598 index 734d9b5..48a9a4b 100644
79599 --- a/include/net/gro_cells.h
79600 +++ b/include/net/gro_cells.h
79601 @@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
79602 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
79603
79604 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
79605 - atomic_long_inc(&dev->rx_dropped);
79606 + atomic_long_inc_unchecked(&dev->rx_dropped);
79607 kfree_skb(skb);
79608 return;
79609 }
79610 diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
79611 index de2c785..0588a6b 100644
79612 --- a/include/net/inet_connection_sock.h
79613 +++ b/include/net/inet_connection_sock.h
79614 @@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
79615 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
79616 int (*bind_conflict)(const struct sock *sk,
79617 const struct inet_bind_bucket *tb, bool relax);
79618 -};
79619 +} __do_const;
79620
79621 /** inet_connection_sock - INET connection oriented sock
79622 *
79623 diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
79624 index 53f464d..0bd0b49 100644
79625 --- a/include/net/inetpeer.h
79626 +++ b/include/net/inetpeer.h
79627 @@ -47,8 +47,8 @@ struct inet_peer {
79628 */
79629 union {
79630 struct {
79631 - atomic_t rid; /* Frag reception counter */
79632 - atomic_t ip_id_count; /* IP ID for the next packet */
79633 + atomic_unchecked_t rid; /* Frag reception counter */
79634 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
79635 };
79636 struct rcu_head rcu;
79637 struct inet_peer *gc_next;
79638 @@ -178,16 +178,13 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
79639 /* can be called with or without local BH being disabled */
79640 static inline int inet_getid(struct inet_peer *p, int more)
79641 {
79642 - int old, new;
79643 + int id;
79644 more++;
79645 inet_peer_refcheck(p);
79646 - do {
79647 - old = atomic_read(&p->ip_id_count);
79648 - new = old + more;
79649 - if (!new)
79650 - new = 1;
79651 - } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
79652 - return new;
79653 + id = atomic_add_return_unchecked(more, &p->ip_id_count);
79654 + if (!id)
79655 + id = atomic_inc_return_unchecked(&p->ip_id_count);
79656 + return id;
79657 }
79658
79659 #endif /* _NET_INETPEER_H */
79660 diff --git a/include/net/ip.h b/include/net/ip.h
79661 index 301f10c..b52cdaf 100644
79662 --- a/include/net/ip.h
79663 +++ b/include/net/ip.h
79664 @@ -212,7 +212,7 @@ extern struct local_ports {
79665 } sysctl_local_ports;
79666 extern void inet_get_local_port_range(int *low, int *high);
79667
79668 -extern unsigned long *sysctl_local_reserved_ports;
79669 +extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
79670 static inline int inet_is_reserved_local_port(int port)
79671 {
79672 return test_bit(port, sysctl_local_reserved_ports);
79673 diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
79674 index cbf2be3..3683f6d 100644
79675 --- a/include/net/ip_fib.h
79676 +++ b/include/net/ip_fib.h
79677 @@ -169,7 +169,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
79678
79679 #define FIB_RES_SADDR(net, res) \
79680 ((FIB_RES_NH(res).nh_saddr_genid == \
79681 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
79682 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
79683 FIB_RES_NH(res).nh_saddr : \
79684 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
79685 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
79686 diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
79687 index 772252d..1e69799 100644
79688 --- a/include/net/ip_vs.h
79689 +++ b/include/net/ip_vs.h
79690 @@ -558,7 +558,7 @@ struct ip_vs_conn {
79691 struct ip_vs_conn *control; /* Master control connection */
79692 atomic_t n_control; /* Number of controlled ones */
79693 struct ip_vs_dest *dest; /* real server */
79694 - atomic_t in_pkts; /* incoming packet counter */
79695 + atomic_unchecked_t in_pkts; /* incoming packet counter */
79696
79697 /* packet transmitter for different forwarding methods. If it
79698 mangles the packet, it must return NF_DROP or better NF_STOLEN,
79699 @@ -705,7 +705,7 @@ struct ip_vs_dest {
79700 __be16 port; /* port number of the server */
79701 union nf_inet_addr addr; /* IP address of the server */
79702 volatile unsigned int flags; /* dest status flags */
79703 - atomic_t conn_flags; /* flags to copy to conn */
79704 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
79705 atomic_t weight; /* server weight */
79706
79707 atomic_t refcnt; /* reference counter */
79708 @@ -960,11 +960,11 @@ struct netns_ipvs {
79709 /* ip_vs_lblc */
79710 int sysctl_lblc_expiration;
79711 struct ctl_table_header *lblc_ctl_header;
79712 - struct ctl_table *lblc_ctl_table;
79713 + ctl_table_no_const *lblc_ctl_table;
79714 /* ip_vs_lblcr */
79715 int sysctl_lblcr_expiration;
79716 struct ctl_table_header *lblcr_ctl_header;
79717 - struct ctl_table *lblcr_ctl_table;
79718 + ctl_table_no_const *lblcr_ctl_table;
79719 /* ip_vs_est */
79720 struct list_head est_list; /* estimator list */
79721 spinlock_t est_lock;
79722 diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
79723 index 80ffde3..968b0f4 100644
79724 --- a/include/net/irda/ircomm_tty.h
79725 +++ b/include/net/irda/ircomm_tty.h
79726 @@ -35,6 +35,7 @@
79727 #include <linux/termios.h>
79728 #include <linux/timer.h>
79729 #include <linux/tty.h> /* struct tty_struct */
79730 +#include <asm/local.h>
79731
79732 #include <net/irda/irias_object.h>
79733 #include <net/irda/ircomm_core.h>
79734 diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
79735 index 714cc9a..ea05f3e 100644
79736 --- a/include/net/iucv/af_iucv.h
79737 +++ b/include/net/iucv/af_iucv.h
79738 @@ -149,7 +149,7 @@ struct iucv_skb_cb {
79739 struct iucv_sock_list {
79740 struct hlist_head head;
79741 rwlock_t lock;
79742 - atomic_t autobind_name;
79743 + atomic_unchecked_t autobind_name;
79744 };
79745
79746 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
79747 diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
79748 index df83f69..9b640b8 100644
79749 --- a/include/net/llc_c_ac.h
79750 +++ b/include/net/llc_c_ac.h
79751 @@ -87,7 +87,7 @@
79752 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
79753 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
79754
79755 -typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
79756 +typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
79757
79758 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
79759 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
79760 diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
79761 index 6ca3113..f8026dd 100644
79762 --- a/include/net/llc_c_ev.h
79763 +++ b/include/net/llc_c_ev.h
79764 @@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
79765 return (struct llc_conn_state_ev *)skb->cb;
79766 }
79767
79768 -typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
79769 -typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
79770 +typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
79771 +typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
79772
79773 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
79774 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
79775 diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
79776 index 0e79cfb..f46db31 100644
79777 --- a/include/net/llc_c_st.h
79778 +++ b/include/net/llc_c_st.h
79779 @@ -37,7 +37,7 @@ struct llc_conn_state_trans {
79780 u8 next_state;
79781 llc_conn_ev_qfyr_t *ev_qualifiers;
79782 llc_conn_action_t *ev_actions;
79783 -};
79784 +} __do_const;
79785
79786 struct llc_conn_state {
79787 u8 current_state;
79788 diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
79789 index 37a3bbd..55a4241 100644
79790 --- a/include/net/llc_s_ac.h
79791 +++ b/include/net/llc_s_ac.h
79792 @@ -23,7 +23,7 @@
79793 #define SAP_ACT_TEST_IND 9
79794
79795 /* All action functions must look like this */
79796 -typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
79797 +typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
79798
79799 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
79800 struct sk_buff *skb);
79801 diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
79802 index 567c681..cd73ac0 100644
79803 --- a/include/net/llc_s_st.h
79804 +++ b/include/net/llc_s_st.h
79805 @@ -20,7 +20,7 @@ struct llc_sap_state_trans {
79806 llc_sap_ev_t ev;
79807 u8 next_state;
79808 llc_sap_action_t *ev_actions;
79809 -};
79810 +} __do_const;
79811
79812 struct llc_sap_state {
79813 u8 curr_state;
79814 diff --git a/include/net/mac80211.h b/include/net/mac80211.h
79815 index cc6035f..a8406fc 100644
79816 --- a/include/net/mac80211.h
79817 +++ b/include/net/mac80211.h
79818 @@ -4361,7 +4361,7 @@ struct rate_control_ops {
79819 void (*add_sta_debugfs)(void *priv, void *priv_sta,
79820 struct dentry *dir);
79821 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
79822 -};
79823 +} __do_const;
79824
79825 static inline int rate_supported(struct ieee80211_sta *sta,
79826 enum ieee80211_band band,
79827 diff --git a/include/net/neighbour.h b/include/net/neighbour.h
79828 index 536501a..7c6193c 100644
79829 --- a/include/net/neighbour.h
79830 +++ b/include/net/neighbour.h
79831 @@ -123,7 +123,7 @@ struct neigh_ops {
79832 void (*error_report)(struct neighbour *, struct sk_buff *);
79833 int (*output)(struct neighbour *, struct sk_buff *);
79834 int (*connected_output)(struct neighbour *, struct sk_buff *);
79835 -};
79836 +} __do_const;
79837
79838 struct pneigh_entry {
79839 struct pneigh_entry *next;
79840 diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
79841 index 9d22f08..980fbf8 100644
79842 --- a/include/net/net_namespace.h
79843 +++ b/include/net/net_namespace.h
79844 @@ -120,7 +120,7 @@ struct net {
79845 struct netns_ipvs *ipvs;
79846 #endif
79847 struct sock *diag_nlsk;
79848 - atomic_t fnhe_genid;
79849 + atomic_unchecked_t fnhe_genid;
79850 };
79851
79852 /*
79853 @@ -277,7 +277,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
79854 #define __net_init __init
79855 #define __net_exit __exit_refok
79856 #define __net_initdata __initdata
79857 +#ifdef CONSTIFY_PLUGIN
79858 #define __net_initconst __initconst
79859 +#else
79860 +#define __net_initconst __initdata
79861 +#endif
79862 #endif
79863
79864 struct pernet_operations {
79865 @@ -287,7 +291,7 @@ struct pernet_operations {
79866 void (*exit_batch)(struct list_head *net_exit_list);
79867 int *id;
79868 size_t size;
79869 -};
79870 +} __do_const;
79871
79872 /*
79873 * Use these carefully. If you implement a network device and it
79874 @@ -335,23 +339,23 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
79875
79876 static inline int rt_genid_ipv4(struct net *net)
79877 {
79878 - return atomic_read(&net->ipv4.rt_genid);
79879 + return atomic_read_unchecked(&net->ipv4.rt_genid);
79880 }
79881
79882 static inline void rt_genid_bump_ipv4(struct net *net)
79883 {
79884 - atomic_inc(&net->ipv4.rt_genid);
79885 + atomic_inc_unchecked(&net->ipv4.rt_genid);
79886 }
79887
79888 #if IS_ENABLED(CONFIG_IPV6)
79889 static inline int rt_genid_ipv6(struct net *net)
79890 {
79891 - return atomic_read(&net->ipv6.rt_genid);
79892 + return atomic_read_unchecked(&net->ipv6.rt_genid);
79893 }
79894
79895 static inline void rt_genid_bump_ipv6(struct net *net)
79896 {
79897 - atomic_inc(&net->ipv6.rt_genid);
79898 + atomic_inc_unchecked(&net->ipv6.rt_genid);
79899 }
79900 #else
79901 static inline int rt_genid_ipv6(struct net *net)
79902 @@ -373,12 +377,12 @@ static inline void rt_genid_bump_all(struct net *net)
79903
79904 static inline int fnhe_genid(struct net *net)
79905 {
79906 - return atomic_read(&net->fnhe_genid);
79907 + return atomic_read_unchecked(&net->fnhe_genid);
79908 }
79909
79910 static inline void fnhe_genid_bump(struct net *net)
79911 {
79912 - atomic_inc(&net->fnhe_genid);
79913 + atomic_inc_unchecked(&net->fnhe_genid);
79914 }
79915
79916 #endif /* __NET_NET_NAMESPACE_H */
79917 diff --git a/include/net/netdma.h b/include/net/netdma.h
79918 index 8ba8ce2..99b7fff 100644
79919 --- a/include/net/netdma.h
79920 +++ b/include/net/netdma.h
79921 @@ -24,7 +24,7 @@
79922 #include <linux/dmaengine.h>
79923 #include <linux/skbuff.h>
79924
79925 -int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
79926 +int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
79927 struct sk_buff *skb, int offset, struct iovec *to,
79928 size_t len, struct dma_pinned_list *pinned_list);
79929
79930 diff --git a/include/net/netlink.h b/include/net/netlink.h
79931 index 9690b0f..87aded7 100644
79932 --- a/include/net/netlink.h
79933 +++ b/include/net/netlink.h
79934 @@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
79935 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
79936 {
79937 if (mark)
79938 - skb_trim(skb, (unsigned char *) mark - skb->data);
79939 + skb_trim(skb, (const unsigned char *) mark - skb->data);
79940 }
79941
79942 /**
79943 diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
79944 index c9c0c53..53f24c3 100644
79945 --- a/include/net/netns/conntrack.h
79946 +++ b/include/net/netns/conntrack.h
79947 @@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
79948 struct nf_proto_net {
79949 #ifdef CONFIG_SYSCTL
79950 struct ctl_table_header *ctl_table_header;
79951 - struct ctl_table *ctl_table;
79952 + ctl_table_no_const *ctl_table;
79953 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
79954 struct ctl_table_header *ctl_compat_header;
79955 - struct ctl_table *ctl_compat_table;
79956 + ctl_table_no_const *ctl_compat_table;
79957 #endif
79958 #endif
79959 unsigned int users;
79960 @@ -58,7 +58,7 @@ struct nf_ip_net {
79961 struct nf_icmp_net icmpv6;
79962 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
79963 struct ctl_table_header *ctl_table_header;
79964 - struct ctl_table *ctl_table;
79965 + ctl_table_no_const *ctl_table;
79966 #endif
79967 };
79968
79969 diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
79970 index bf2ec22..5e7f9d9 100644
79971 --- a/include/net/netns/ipv4.h
79972 +++ b/include/net/netns/ipv4.h
79973 @@ -67,7 +67,7 @@ struct netns_ipv4 {
79974 kgid_t sysctl_ping_group_range[2];
79975 long sysctl_tcp_mem[3];
79976
79977 - atomic_t dev_addr_genid;
79978 + atomic_unchecked_t dev_addr_genid;
79979
79980 #ifdef CONFIG_IP_MROUTE
79981 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
79982 @@ -77,6 +77,6 @@ struct netns_ipv4 {
79983 struct fib_rules_ops *mr_rules_ops;
79984 #endif
79985 #endif
79986 - atomic_t rt_genid;
79987 + atomic_unchecked_t rt_genid;
79988 };
79989 #endif
79990 diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
79991 index 0fb2401..477d81c 100644
79992 --- a/include/net/netns/ipv6.h
79993 +++ b/include/net/netns/ipv6.h
79994 @@ -71,8 +71,8 @@ struct netns_ipv6 {
79995 struct fib_rules_ops *mr6_rules_ops;
79996 #endif
79997 #endif
79998 - atomic_t dev_addr_genid;
79999 - atomic_t rt_genid;
80000 + atomic_unchecked_t dev_addr_genid;
80001 + atomic_unchecked_t rt_genid;
80002 };
80003
80004 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
80005 diff --git a/include/net/ping.h b/include/net/ping.h
80006 index 2b496e9..935fd8d 100644
80007 --- a/include/net/ping.h
80008 +++ b/include/net/ping.h
80009 @@ -56,7 +56,7 @@ struct ping_iter_state {
80010 extern struct proto ping_prot;
80011 extern struct ping_table ping_table;
80012 #if IS_ENABLED(CONFIG_IPV6)
80013 -extern struct pingv6_ops pingv6_ops;
80014 +extern struct pingv6_ops *pingv6_ops;
80015 #endif
80016
80017 struct pingfakehdr {
80018 diff --git a/include/net/protocol.h b/include/net/protocol.h
80019 index 047c047..b9dad15 100644
80020 --- a/include/net/protocol.h
80021 +++ b/include/net/protocol.h
80022 @@ -44,7 +44,7 @@ struct net_protocol {
80023 void (*err_handler)(struct sk_buff *skb, u32 info);
80024 unsigned int no_policy:1,
80025 netns_ok:1;
80026 -};
80027 +} __do_const;
80028
80029 #if IS_ENABLED(CONFIG_IPV6)
80030 struct inet6_protocol {
80031 @@ -57,7 +57,7 @@ struct inet6_protocol {
80032 u8 type, u8 code, int offset,
80033 __be32 info);
80034 unsigned int flags; /* INET6_PROTO_xxx */
80035 -};
80036 +} __do_const;
80037
80038 #define INET6_PROTO_NOPOLICY 0x1
80039 #define INET6_PROTO_FINAL 0x2
80040 diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
80041 index 7026648..584cc8c 100644
80042 --- a/include/net/rtnetlink.h
80043 +++ b/include/net/rtnetlink.h
80044 @@ -81,7 +81,7 @@ struct rtnl_link_ops {
80045 const struct net_device *dev);
80046 unsigned int (*get_num_tx_queues)(void);
80047 unsigned int (*get_num_rx_queues)(void);
80048 -};
80049 +} __do_const;
80050
80051 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
80052 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
80053 diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
80054 index 4ef75af..5aa073a 100644
80055 --- a/include/net/sctp/sm.h
80056 +++ b/include/net/sctp/sm.h
80057 @@ -81,7 +81,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
80058 typedef struct {
80059 sctp_state_fn_t *fn;
80060 const char *name;
80061 -} sctp_sm_table_entry_t;
80062 +} __do_const sctp_sm_table_entry_t;
80063
80064 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
80065 * currently in use.
80066 @@ -293,7 +293,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
80067 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
80068
80069 /* Extern declarations for major data structures. */
80070 -extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
80071 +extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
80072
80073
80074 /* Get the size of a DATA chunk payload. */
80075 diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
80076 index 2174d8d..71d5257 100644
80077 --- a/include/net/sctp/structs.h
80078 +++ b/include/net/sctp/structs.h
80079 @@ -508,7 +508,7 @@ struct sctp_pf {
80080 struct sctp_association *asoc);
80081 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
80082 struct sctp_af *af;
80083 -};
80084 +} __do_const;
80085
80086
80087 /* Structure to track chunk fragments that have been acked, but peer
80088 diff --git a/include/net/sock.h b/include/net/sock.h
80089 index 808cbc2..8617e9c 100644
80090 --- a/include/net/sock.h
80091 +++ b/include/net/sock.h
80092 @@ -332,7 +332,7 @@ struct sock {
80093 unsigned int sk_napi_id;
80094 unsigned int sk_ll_usec;
80095 #endif
80096 - atomic_t sk_drops;
80097 + atomic_unchecked_t sk_drops;
80098 int sk_rcvbuf;
80099
80100 struct sk_filter __rcu *sk_filter;
80101 @@ -1194,7 +1194,7 @@ static inline u64 memcg_memory_allocated_read(struct cg_proto *prot)
80102 return ret >> PAGE_SHIFT;
80103 }
80104
80105 -static inline long
80106 +static inline long __intentional_overflow(-1)
80107 sk_memory_allocated(const struct sock *sk)
80108 {
80109 struct proto *prot = sk->sk_prot;
80110 @@ -1821,7 +1821,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
80111 }
80112
80113 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
80114 - char __user *from, char *to,
80115 + char __user *from, unsigned char *to,
80116 int copy, int offset)
80117 {
80118 if (skb->ip_summed == CHECKSUM_NONE) {
80119 @@ -2083,7 +2083,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
80120 }
80121 }
80122
80123 -struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
80124 +struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
80125
80126 /**
80127 * sk_page_frag - return an appropriate page_frag
80128 diff --git a/include/net/tcp.h b/include/net/tcp.h
80129 index b1aa324..b8530ea 100644
80130 --- a/include/net/tcp.h
80131 +++ b/include/net/tcp.h
80132 @@ -527,7 +527,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
80133 extern void tcp_xmit_retransmit_queue(struct sock *);
80134 extern void tcp_simple_retransmit(struct sock *);
80135 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
80136 -extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
80137 +extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
80138
80139 extern void tcp_send_probe0(struct sock *);
80140 extern void tcp_send_partial(struct sock *);
80141 @@ -699,8 +699,8 @@ struct tcp_skb_cb {
80142 struct inet6_skb_parm h6;
80143 #endif
80144 } header; /* For incoming frames */
80145 - __u32 seq; /* Starting sequence number */
80146 - __u32 end_seq; /* SEQ + FIN + SYN + datalen */
80147 + __u32 seq __intentional_overflow(0); /* Starting sequence number */
80148 + __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
80149 __u32 when; /* used to compute rtt's */
80150 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
80151
80152 @@ -714,7 +714,7 @@ struct tcp_skb_cb {
80153
80154 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
80155 /* 1 byte hole */
80156 - __u32 ack_seq; /* Sequence number ACK'd */
80157 + __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
80158 };
80159
80160 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
80161 diff --git a/include/net/xfrm.h b/include/net/xfrm.h
80162 index e253bf0..2278b4b 100644
80163 --- a/include/net/xfrm.h
80164 +++ b/include/net/xfrm.h
80165 @@ -287,7 +287,6 @@ struct xfrm_dst;
80166 struct xfrm_policy_afinfo {
80167 unsigned short family;
80168 struct dst_ops *dst_ops;
80169 - void (*garbage_collect)(struct net *net);
80170 struct dst_entry *(*dst_lookup)(struct net *net, int tos,
80171 const xfrm_address_t *saddr,
80172 const xfrm_address_t *daddr);
80173 @@ -305,7 +304,7 @@ struct xfrm_policy_afinfo {
80174 struct net_device *dev,
80175 const struct flowi *fl);
80176 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
80177 -};
80178 +} __do_const;
80179
80180 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
80181 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
80182 @@ -342,7 +341,7 @@ struct xfrm_state_afinfo {
80183 int (*transport_finish)(struct sk_buff *skb,
80184 int async);
80185 void (*local_error)(struct sk_buff *skb, u32 mtu);
80186 -};
80187 +} __do_const;
80188
80189 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
80190 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
80191 @@ -427,7 +426,7 @@ struct xfrm_mode {
80192 struct module *owner;
80193 unsigned int encap;
80194 int flags;
80195 -};
80196 +} __do_const;
80197
80198 /* Flags for xfrm_mode. */
80199 enum {
80200 @@ -524,7 +523,7 @@ struct xfrm_policy {
80201 struct timer_list timer;
80202
80203 struct flow_cache_object flo;
80204 - atomic_t genid;
80205 + atomic_unchecked_t genid;
80206 u32 priority;
80207 u32 index;
80208 struct xfrm_mark mark;
80209 @@ -1164,6 +1163,7 @@ static inline void xfrm_sk_free_policy(struct sock *sk)
80210 }
80211
80212 extern void xfrm_garbage_collect(struct net *net);
80213 +extern void xfrm_garbage_collect_deferred(struct net *net);
80214
80215 #else
80216
80217 @@ -1202,6 +1202,9 @@ static inline int xfrm6_policy_check_reverse(struct sock *sk, int dir,
80218 static inline void xfrm_garbage_collect(struct net *net)
80219 {
80220 }
80221 +static inline void xfrm_garbage_collect_deferred(struct net *net)
80222 +{
80223 +}
80224 #endif
80225
80226 static __inline__
80227 diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
80228 index 1017e0b..227aa4d 100644
80229 --- a/include/rdma/iw_cm.h
80230 +++ b/include/rdma/iw_cm.h
80231 @@ -122,7 +122,7 @@ struct iw_cm_verbs {
80232 int backlog);
80233
80234 int (*destroy_listen)(struct iw_cm_id *cm_id);
80235 -};
80236 +} __no_const;
80237
80238 /**
80239 * iw_create_cm_id - Create an IW CM identifier.
80240 diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
80241 index e1379b4..67eafbe 100644
80242 --- a/include/scsi/libfc.h
80243 +++ b/include/scsi/libfc.h
80244 @@ -762,6 +762,7 @@ struct libfc_function_template {
80245 */
80246 void (*disc_stop_final) (struct fc_lport *);
80247 };
80248 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
80249
80250 /**
80251 * struct fc_disc - Discovery context
80252 @@ -866,7 +867,7 @@ struct fc_lport {
80253 struct fc_vport *vport;
80254
80255 /* Operational Information */
80256 - struct libfc_function_template tt;
80257 + libfc_function_template_no_const tt;
80258 u8 link_up;
80259 u8 qfull;
80260 enum fc_lport_state state;
80261 diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
80262 index d65fbec..f80fef2 100644
80263 --- a/include/scsi/scsi_device.h
80264 +++ b/include/scsi/scsi_device.h
80265 @@ -180,9 +180,9 @@ struct scsi_device {
80266 unsigned int max_device_blocked; /* what device_blocked counts down from */
80267 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
80268
80269 - atomic_t iorequest_cnt;
80270 - atomic_t iodone_cnt;
80271 - atomic_t ioerr_cnt;
80272 + atomic_unchecked_t iorequest_cnt;
80273 + atomic_unchecked_t iodone_cnt;
80274 + atomic_unchecked_t ioerr_cnt;
80275
80276 struct device sdev_gendev,
80277 sdev_dev;
80278 diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
80279 index b797e8f..8e2c3aa 100644
80280 --- a/include/scsi/scsi_transport_fc.h
80281 +++ b/include/scsi/scsi_transport_fc.h
80282 @@ -751,7 +751,8 @@ struct fc_function_template {
80283 unsigned long show_host_system_hostname:1;
80284
80285 unsigned long disable_target_scan:1;
80286 -};
80287 +} __do_const;
80288 +typedef struct fc_function_template __no_const fc_function_template_no_const;
80289
80290
80291 /**
80292 diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
80293 index ae6c3b8..fd748ac 100644
80294 --- a/include/sound/compress_driver.h
80295 +++ b/include/sound/compress_driver.h
80296 @@ -128,7 +128,7 @@ struct snd_compr_ops {
80297 struct snd_compr_caps *caps);
80298 int (*get_codec_caps) (struct snd_compr_stream *stream,
80299 struct snd_compr_codec_caps *codec);
80300 -};
80301 +} __no_const;
80302
80303 /**
80304 * struct snd_compr: Compressed device
80305 diff --git a/include/sound/soc.h b/include/sound/soc.h
80306 index d22cb0a..c6ba150 100644
80307 --- a/include/sound/soc.h
80308 +++ b/include/sound/soc.h
80309 @@ -780,7 +780,7 @@ struct snd_soc_codec_driver {
80310 /* probe ordering - for components with runtime dependencies */
80311 int probe_order;
80312 int remove_order;
80313 -};
80314 +} __do_const;
80315
80316 /* SoC platform interface */
80317 struct snd_soc_platform_driver {
80318 @@ -826,7 +826,7 @@ struct snd_soc_platform_driver {
80319 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
80320 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
80321 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
80322 -};
80323 +} __do_const;
80324
80325 struct snd_soc_platform {
80326 const char *name;
80327 diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
80328 index 5bdb8b7..bb1096c 100644
80329 --- a/include/target/target_core_base.h
80330 +++ b/include/target/target_core_base.h
80331 @@ -663,7 +663,7 @@ struct se_device {
80332 spinlock_t stats_lock;
80333 /* Active commands on this virtual SE device */
80334 atomic_t simple_cmds;
80335 - atomic_t dev_ordered_id;
80336 + atomic_unchecked_t dev_ordered_id;
80337 atomic_t dev_ordered_sync;
80338 atomic_t dev_qf_count;
80339 int export_count;
80340 diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
80341 new file mode 100644
80342 index 0000000..fb634b7
80343 --- /dev/null
80344 +++ b/include/trace/events/fs.h
80345 @@ -0,0 +1,53 @@
80346 +#undef TRACE_SYSTEM
80347 +#define TRACE_SYSTEM fs
80348 +
80349 +#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
80350 +#define _TRACE_FS_H
80351 +
80352 +#include <linux/fs.h>
80353 +#include <linux/tracepoint.h>
80354 +
80355 +TRACE_EVENT(do_sys_open,
80356 +
80357 + TP_PROTO(const char *filename, int flags, int mode),
80358 +
80359 + TP_ARGS(filename, flags, mode),
80360 +
80361 + TP_STRUCT__entry(
80362 + __string( filename, filename )
80363 + __field( int, flags )
80364 + __field( int, mode )
80365 + ),
80366 +
80367 + TP_fast_assign(
80368 + __assign_str(filename, filename);
80369 + __entry->flags = flags;
80370 + __entry->mode = mode;
80371 + ),
80372 +
80373 + TP_printk("\"%s\" %x %o",
80374 + __get_str(filename), __entry->flags, __entry->mode)
80375 +);
80376 +
80377 +TRACE_EVENT(open_exec,
80378 +
80379 + TP_PROTO(const char *filename),
80380 +
80381 + TP_ARGS(filename),
80382 +
80383 + TP_STRUCT__entry(
80384 + __string( filename, filename )
80385 + ),
80386 +
80387 + TP_fast_assign(
80388 + __assign_str(filename, filename);
80389 + ),
80390 +
80391 + TP_printk("\"%s\"",
80392 + __get_str(filename))
80393 +);
80394 +
80395 +#endif /* _TRACE_FS_H */
80396 +
80397 +/* This part must be outside protection */
80398 +#include <trace/define_trace.h>
80399 diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
80400 index 1c09820..7f5ec79 100644
80401 --- a/include/trace/events/irq.h
80402 +++ b/include/trace/events/irq.h
80403 @@ -36,7 +36,7 @@ struct softirq_action;
80404 */
80405 TRACE_EVENT(irq_handler_entry,
80406
80407 - TP_PROTO(int irq, struct irqaction *action),
80408 + TP_PROTO(int irq, const struct irqaction *action),
80409
80410 TP_ARGS(irq, action),
80411
80412 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
80413 */
80414 TRACE_EVENT(irq_handler_exit,
80415
80416 - TP_PROTO(int irq, struct irqaction *action, int ret),
80417 + TP_PROTO(int irq, const struct irqaction *action, int ret),
80418
80419 TP_ARGS(irq, action, ret),
80420
80421 diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
80422 index 7caf44c..23c6f27 100644
80423 --- a/include/uapi/linux/a.out.h
80424 +++ b/include/uapi/linux/a.out.h
80425 @@ -39,6 +39,14 @@ enum machine_type {
80426 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
80427 };
80428
80429 +/* Constants for the N_FLAGS field */
80430 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
80431 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
80432 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
80433 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
80434 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
80435 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
80436 +
80437 #if !defined (N_MAGIC)
80438 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
80439 #endif
80440 diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
80441 index d876736..ccce5c0 100644
80442 --- a/include/uapi/linux/byteorder/little_endian.h
80443 +++ b/include/uapi/linux/byteorder/little_endian.h
80444 @@ -42,51 +42,51 @@
80445
80446 static inline __le64 __cpu_to_le64p(const __u64 *p)
80447 {
80448 - return (__force __le64)*p;
80449 + return (__force const __le64)*p;
80450 }
80451 -static inline __u64 __le64_to_cpup(const __le64 *p)
80452 +static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
80453 {
80454 - return (__force __u64)*p;
80455 + return (__force const __u64)*p;
80456 }
80457 static inline __le32 __cpu_to_le32p(const __u32 *p)
80458 {
80459 - return (__force __le32)*p;
80460 + return (__force const __le32)*p;
80461 }
80462 static inline __u32 __le32_to_cpup(const __le32 *p)
80463 {
80464 - return (__force __u32)*p;
80465 + return (__force const __u32)*p;
80466 }
80467 static inline __le16 __cpu_to_le16p(const __u16 *p)
80468 {
80469 - return (__force __le16)*p;
80470 + return (__force const __le16)*p;
80471 }
80472 static inline __u16 __le16_to_cpup(const __le16 *p)
80473 {
80474 - return (__force __u16)*p;
80475 + return (__force const __u16)*p;
80476 }
80477 static inline __be64 __cpu_to_be64p(const __u64 *p)
80478 {
80479 - return (__force __be64)__swab64p(p);
80480 + return (__force const __be64)__swab64p(p);
80481 }
80482 static inline __u64 __be64_to_cpup(const __be64 *p)
80483 {
80484 - return __swab64p((__u64 *)p);
80485 + return __swab64p((const __u64 *)p);
80486 }
80487 static inline __be32 __cpu_to_be32p(const __u32 *p)
80488 {
80489 - return (__force __be32)__swab32p(p);
80490 + return (__force const __be32)__swab32p(p);
80491 }
80492 -static inline __u32 __be32_to_cpup(const __be32 *p)
80493 +static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
80494 {
80495 - return __swab32p((__u32 *)p);
80496 + return __swab32p((const __u32 *)p);
80497 }
80498 static inline __be16 __cpu_to_be16p(const __u16 *p)
80499 {
80500 - return (__force __be16)__swab16p(p);
80501 + return (__force const __be16)__swab16p(p);
80502 }
80503 static inline __u16 __be16_to_cpup(const __be16 *p)
80504 {
80505 - return __swab16p((__u16 *)p);
80506 + return __swab16p((const __u16 *)p);
80507 }
80508 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
80509 #define __le64_to_cpus(x) do { (void)(x); } while (0)
80510 diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
80511 index ef6103b..d4e65dd 100644
80512 --- a/include/uapi/linux/elf.h
80513 +++ b/include/uapi/linux/elf.h
80514 @@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
80515 #define PT_GNU_EH_FRAME 0x6474e550
80516
80517 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
80518 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
80519 +
80520 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
80521 +
80522 +/* Constants for the e_flags field */
80523 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
80524 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
80525 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
80526 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
80527 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
80528 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
80529
80530 /*
80531 * Extended Numbering
80532 @@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
80533 #define DT_DEBUG 21
80534 #define DT_TEXTREL 22
80535 #define DT_JMPREL 23
80536 +#define DT_FLAGS 30
80537 + #define DF_TEXTREL 0x00000004
80538 #define DT_ENCODING 32
80539 #define OLD_DT_LOOS 0x60000000
80540 #define DT_LOOS 0x6000000d
80541 @@ -240,6 +253,19 @@ typedef struct elf64_hdr {
80542 #define PF_W 0x2
80543 #define PF_X 0x1
80544
80545 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
80546 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
80547 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
80548 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
80549 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
80550 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
80551 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
80552 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
80553 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
80554 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
80555 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
80556 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
80557 +
80558 typedef struct elf32_phdr{
80559 Elf32_Word p_type;
80560 Elf32_Off p_offset;
80561 @@ -332,6 +358,8 @@ typedef struct elf64_shdr {
80562 #define EI_OSABI 7
80563 #define EI_PAD 8
80564
80565 +#define EI_PAX 14
80566 +
80567 #define ELFMAG0 0x7f /* EI_MAG */
80568 #define ELFMAG1 'E'
80569 #define ELFMAG2 'L'
80570 diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
80571 index aa169c4..6a2771d 100644
80572 --- a/include/uapi/linux/personality.h
80573 +++ b/include/uapi/linux/personality.h
80574 @@ -30,6 +30,7 @@ enum {
80575 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
80576 ADDR_NO_RANDOMIZE | \
80577 ADDR_COMPAT_LAYOUT | \
80578 + ADDR_LIMIT_3GB | \
80579 MMAP_PAGE_ZERO)
80580
80581 /*
80582 diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
80583 index 7530e74..e714828 100644
80584 --- a/include/uapi/linux/screen_info.h
80585 +++ b/include/uapi/linux/screen_info.h
80586 @@ -43,7 +43,8 @@ struct screen_info {
80587 __u16 pages; /* 0x32 */
80588 __u16 vesa_attributes; /* 0x34 */
80589 __u32 capabilities; /* 0x36 */
80590 - __u8 _reserved[6]; /* 0x3a */
80591 + __u16 vesapm_size; /* 0x3a */
80592 + __u8 _reserved[4]; /* 0x3c */
80593 } __attribute__((packed));
80594
80595 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
80596 diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
80597 index 0e011eb..82681b1 100644
80598 --- a/include/uapi/linux/swab.h
80599 +++ b/include/uapi/linux/swab.h
80600 @@ -43,7 +43,7 @@
80601 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
80602 */
80603
80604 -static inline __attribute_const__ __u16 __fswab16(__u16 val)
80605 +static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
80606 {
80607 #ifdef __HAVE_BUILTIN_BSWAP16__
80608 return __builtin_bswap16(val);
80609 @@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
80610 #endif
80611 }
80612
80613 -static inline __attribute_const__ __u32 __fswab32(__u32 val)
80614 +static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
80615 {
80616 #ifdef __HAVE_BUILTIN_BSWAP32__
80617 return __builtin_bswap32(val);
80618 @@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
80619 #endif
80620 }
80621
80622 -static inline __attribute_const__ __u64 __fswab64(__u64 val)
80623 +static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
80624 {
80625 #ifdef __HAVE_BUILTIN_BSWAP64__
80626 return __builtin_bswap64(val);
80627 diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
80628 index 6d67213..8dab561 100644
80629 --- a/include/uapi/linux/sysctl.h
80630 +++ b/include/uapi/linux/sysctl.h
80631 @@ -155,7 +155,11 @@ enum
80632 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
80633 };
80634
80635 -
80636 +#ifdef CONFIG_PAX_SOFTMODE
80637 +enum {
80638 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
80639 +};
80640 +#endif
80641
80642 /* CTL_VM names: */
80643 enum
80644 diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
80645 index e4629b9..6958086 100644
80646 --- a/include/uapi/linux/xattr.h
80647 +++ b/include/uapi/linux/xattr.h
80648 @@ -63,5 +63,9 @@
80649 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
80650 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
80651
80652 +/* User namespace */
80653 +#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
80654 +#define XATTR_PAX_FLAGS_SUFFIX "flags"
80655 +#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
80656
80657 #endif /* _UAPI_LINUX_XATTR_H */
80658 diff --git a/include/video/udlfb.h b/include/video/udlfb.h
80659 index f9466fa..f4e2b81 100644
80660 --- a/include/video/udlfb.h
80661 +++ b/include/video/udlfb.h
80662 @@ -53,10 +53,10 @@ struct dlfb_data {
80663 u32 pseudo_palette[256];
80664 int blank_mode; /*one of FB_BLANK_ */
80665 /* blit-only rendering path metrics, exposed through sysfs */
80666 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
80667 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
80668 - atomic_t bytes_sent; /* to usb, after compression including overhead */
80669 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
80670 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
80671 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
80672 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
80673 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
80674 };
80675
80676 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
80677 diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
80678 index 30f5362..8ed8ac9 100644
80679 --- a/include/video/uvesafb.h
80680 +++ b/include/video/uvesafb.h
80681 @@ -122,6 +122,7 @@ struct uvesafb_par {
80682 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
80683 u8 pmi_setpal; /* PMI for palette changes */
80684 u16 *pmi_base; /* protected mode interface location */
80685 + u8 *pmi_code; /* protected mode code location */
80686 void *pmi_start;
80687 void *pmi_pal;
80688 u8 *vbe_state_orig; /*
80689 diff --git a/init/Kconfig b/init/Kconfig
80690 index 3ecd8a1..627843f 100644
80691 --- a/init/Kconfig
80692 +++ b/init/Kconfig
80693 @@ -1086,6 +1086,7 @@ endif # CGROUPS
80694
80695 config CHECKPOINT_RESTORE
80696 bool "Checkpoint/restore support" if EXPERT
80697 + depends on !GRKERNSEC
80698 default n
80699 help
80700 Enables additional kernel features in a sake of checkpoint/restore.
80701 @@ -1557,7 +1558,7 @@ config SLUB_DEBUG
80702
80703 config COMPAT_BRK
80704 bool "Disable heap randomization"
80705 - default y
80706 + default n
80707 help
80708 Randomizing heap placement makes heap exploits harder, but it
80709 also breaks ancient binaries (including anything libc5 based).
80710 @@ -1832,7 +1833,7 @@ config INIT_ALL_POSSIBLE
80711 config STOP_MACHINE
80712 bool
80713 default y
80714 - depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
80715 + depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
80716 help
80717 Need stop_machine() primitive.
80718
80719 diff --git a/init/Makefile b/init/Makefile
80720 index 7bc47ee..6da2dc7 100644
80721 --- a/init/Makefile
80722 +++ b/init/Makefile
80723 @@ -2,6 +2,9 @@
80724 # Makefile for the linux kernel.
80725 #
80726
80727 +ccflags-y := $(GCC_PLUGINS_CFLAGS)
80728 +asflags-y := $(GCC_PLUGINS_AFLAGS)
80729 +
80730 obj-y := main.o version.o mounts.o
80731 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
80732 obj-y += noinitramfs.o
80733 diff --git a/init/do_mounts.c b/init/do_mounts.c
80734 index a51cddc..25c2768 100644
80735 --- a/init/do_mounts.c
80736 +++ b/init/do_mounts.c
80737 @@ -357,11 +357,11 @@ static void __init get_fs_names(char *page)
80738 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
80739 {
80740 struct super_block *s;
80741 - int err = sys_mount(name, "/root", fs, flags, data);
80742 + int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
80743 if (err)
80744 return err;
80745
80746 - sys_chdir("/root");
80747 + sys_chdir((const char __force_user *)"/root");
80748 s = current->fs->pwd.dentry->d_sb;
80749 ROOT_DEV = s->s_dev;
80750 printk(KERN_INFO
80751 @@ -482,18 +482,18 @@ void __init change_floppy(char *fmt, ...)
80752 va_start(args, fmt);
80753 vsprintf(buf, fmt, args);
80754 va_end(args);
80755 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
80756 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
80757 if (fd >= 0) {
80758 sys_ioctl(fd, FDEJECT, 0);
80759 sys_close(fd);
80760 }
80761 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
80762 - fd = sys_open("/dev/console", O_RDWR, 0);
80763 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
80764 if (fd >= 0) {
80765 sys_ioctl(fd, TCGETS, (long)&termios);
80766 termios.c_lflag &= ~ICANON;
80767 sys_ioctl(fd, TCSETSF, (long)&termios);
80768 - sys_read(fd, &c, 1);
80769 + sys_read(fd, (char __user *)&c, 1);
80770 termios.c_lflag |= ICANON;
80771 sys_ioctl(fd, TCSETSF, (long)&termios);
80772 sys_close(fd);
80773 @@ -587,8 +587,8 @@ void __init prepare_namespace(void)
80774 mount_root();
80775 out:
80776 devtmpfs_mount("dev");
80777 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
80778 - sys_chroot(".");
80779 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
80780 + sys_chroot((const char __force_user *)".");
80781 }
80782
80783 static bool is_tmpfs;
80784 diff --git a/init/do_mounts.h b/init/do_mounts.h
80785 index f5b978a..69dbfe8 100644
80786 --- a/init/do_mounts.h
80787 +++ b/init/do_mounts.h
80788 @@ -15,15 +15,15 @@ extern int root_mountflags;
80789
80790 static inline int create_dev(char *name, dev_t dev)
80791 {
80792 - sys_unlink(name);
80793 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
80794 + sys_unlink((char __force_user *)name);
80795 + return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
80796 }
80797
80798 #if BITS_PER_LONG == 32
80799 static inline u32 bstat(char *name)
80800 {
80801 struct stat64 stat;
80802 - if (sys_stat64(name, &stat) != 0)
80803 + if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
80804 return 0;
80805 if (!S_ISBLK(stat.st_mode))
80806 return 0;
80807 @@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
80808 static inline u32 bstat(char *name)
80809 {
80810 struct stat stat;
80811 - if (sys_newstat(name, &stat) != 0)
80812 + if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
80813 return 0;
80814 if (!S_ISBLK(stat.st_mode))
80815 return 0;
80816 diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
80817 index 3e0878e..8a9d7a0 100644
80818 --- a/init/do_mounts_initrd.c
80819 +++ b/init/do_mounts_initrd.c
80820 @@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
80821 {
80822 sys_unshare(CLONE_FS | CLONE_FILES);
80823 /* stdin/stdout/stderr for /linuxrc */
80824 - sys_open("/dev/console", O_RDWR, 0);
80825 + sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
80826 sys_dup(0);
80827 sys_dup(0);
80828 /* move initrd over / and chdir/chroot in initrd root */
80829 - sys_chdir("/root");
80830 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
80831 - sys_chroot(".");
80832 + sys_chdir((const char __force_user *)"/root");
80833 + sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
80834 + sys_chroot((const char __force_user *)".");
80835 sys_setsid();
80836 return 0;
80837 }
80838 @@ -59,8 +59,8 @@ static void __init handle_initrd(void)
80839 create_dev("/dev/root.old", Root_RAM0);
80840 /* mount initrd on rootfs' /root */
80841 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
80842 - sys_mkdir("/old", 0700);
80843 - sys_chdir("/old");
80844 + sys_mkdir((const char __force_user *)"/old", 0700);
80845 + sys_chdir((const char __force_user *)"/old");
80846
80847 /* try loading default modules from initrd */
80848 load_default_modules();
80849 @@ -80,31 +80,31 @@ static void __init handle_initrd(void)
80850 current->flags &= ~PF_FREEZER_SKIP;
80851
80852 /* move initrd to rootfs' /old */
80853 - sys_mount("..", ".", NULL, MS_MOVE, NULL);
80854 + sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
80855 /* switch root and cwd back to / of rootfs */
80856 - sys_chroot("..");
80857 + sys_chroot((const char __force_user *)"..");
80858
80859 if (new_decode_dev(real_root_dev) == Root_RAM0) {
80860 - sys_chdir("/old");
80861 + sys_chdir((const char __force_user *)"/old");
80862 return;
80863 }
80864
80865 - sys_chdir("/");
80866 + sys_chdir((const char __force_user *)"/");
80867 ROOT_DEV = new_decode_dev(real_root_dev);
80868 mount_root();
80869
80870 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
80871 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
80872 + error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
80873 if (!error)
80874 printk("okay\n");
80875 else {
80876 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
80877 + int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
80878 if (error == -ENOENT)
80879 printk("/initrd does not exist. Ignored.\n");
80880 else
80881 printk("failed\n");
80882 printk(KERN_NOTICE "Unmounting old root\n");
80883 - sys_umount("/old", MNT_DETACH);
80884 + sys_umount((char __force_user *)"/old", MNT_DETACH);
80885 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
80886 if (fd < 0) {
80887 error = fd;
80888 @@ -127,11 +127,11 @@ int __init initrd_load(void)
80889 * mounted in the normal path.
80890 */
80891 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
80892 - sys_unlink("/initrd.image");
80893 + sys_unlink((const char __force_user *)"/initrd.image");
80894 handle_initrd();
80895 return 1;
80896 }
80897 }
80898 - sys_unlink("/initrd.image");
80899 + sys_unlink((const char __force_user *)"/initrd.image");
80900 return 0;
80901 }
80902 diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
80903 index 8cb6db5..d729f50 100644
80904 --- a/init/do_mounts_md.c
80905 +++ b/init/do_mounts_md.c
80906 @@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
80907 partitioned ? "_d" : "", minor,
80908 md_setup_args[ent].device_names);
80909
80910 - fd = sys_open(name, 0, 0);
80911 + fd = sys_open((char __force_user *)name, 0, 0);
80912 if (fd < 0) {
80913 printk(KERN_ERR "md: open failed - cannot start "
80914 "array %s\n", name);
80915 @@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
80916 * array without it
80917 */
80918 sys_close(fd);
80919 - fd = sys_open(name, 0, 0);
80920 + fd = sys_open((char __force_user *)name, 0, 0);
80921 sys_ioctl(fd, BLKRRPART, 0);
80922 }
80923 sys_close(fd);
80924 @@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
80925
80926 wait_for_device_probe();
80927
80928 - fd = sys_open("/dev/md0", 0, 0);
80929 + fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
80930 if (fd >= 0) {
80931 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
80932 sys_close(fd);
80933 diff --git a/init/init_task.c b/init/init_task.c
80934 index ba0a7f36..2bcf1d5 100644
80935 --- a/init/init_task.c
80936 +++ b/init/init_task.c
80937 @@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
80938 * Initial thread structure. Alignment of this is handled by a special
80939 * linker map entry.
80940 */
80941 +#ifdef CONFIG_X86
80942 +union thread_union init_thread_union __init_task_data;
80943 +#else
80944 union thread_union init_thread_union __init_task_data =
80945 { INIT_THREAD_INFO(init_task) };
80946 +#endif
80947 diff --git a/init/initramfs.c b/init/initramfs.c
80948 index a67ef9d..2d17ed9 100644
80949 --- a/init/initramfs.c
80950 +++ b/init/initramfs.c
80951 @@ -84,7 +84,7 @@ static void __init free_hash(void)
80952 }
80953 }
80954
80955 -static long __init do_utime(char *filename, time_t mtime)
80956 +static long __init do_utime(char __force_user *filename, time_t mtime)
80957 {
80958 struct timespec t[2];
80959
80960 @@ -119,7 +119,7 @@ static void __init dir_utime(void)
80961 struct dir_entry *de, *tmp;
80962 list_for_each_entry_safe(de, tmp, &dir_list, list) {
80963 list_del(&de->list);
80964 - do_utime(de->name, de->mtime);
80965 + do_utime((char __force_user *)de->name, de->mtime);
80966 kfree(de->name);
80967 kfree(de);
80968 }
80969 @@ -281,7 +281,7 @@ static int __init maybe_link(void)
80970 if (nlink >= 2) {
80971 char *old = find_link(major, minor, ino, mode, collected);
80972 if (old)
80973 - return (sys_link(old, collected) < 0) ? -1 : 1;
80974 + return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
80975 }
80976 return 0;
80977 }
80978 @@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
80979 {
80980 struct stat st;
80981
80982 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
80983 + if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
80984 if (S_ISDIR(st.st_mode))
80985 - sys_rmdir(path);
80986 + sys_rmdir((char __force_user *)path);
80987 else
80988 - sys_unlink(path);
80989 + sys_unlink((char __force_user *)path);
80990 }
80991 }
80992
80993 @@ -315,7 +315,7 @@ static int __init do_name(void)
80994 int openflags = O_WRONLY|O_CREAT;
80995 if (ml != 1)
80996 openflags |= O_TRUNC;
80997 - wfd = sys_open(collected, openflags, mode);
80998 + wfd = sys_open((char __force_user *)collected, openflags, mode);
80999
81000 if (wfd >= 0) {
81001 sys_fchown(wfd, uid, gid);
81002 @@ -327,17 +327,17 @@ static int __init do_name(void)
81003 }
81004 }
81005 } else if (S_ISDIR(mode)) {
81006 - sys_mkdir(collected, mode);
81007 - sys_chown(collected, uid, gid);
81008 - sys_chmod(collected, mode);
81009 + sys_mkdir((char __force_user *)collected, mode);
81010 + sys_chown((char __force_user *)collected, uid, gid);
81011 + sys_chmod((char __force_user *)collected, mode);
81012 dir_add(collected, mtime);
81013 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
81014 S_ISFIFO(mode) || S_ISSOCK(mode)) {
81015 if (maybe_link() == 0) {
81016 - sys_mknod(collected, mode, rdev);
81017 - sys_chown(collected, uid, gid);
81018 - sys_chmod(collected, mode);
81019 - do_utime(collected, mtime);
81020 + sys_mknod((char __force_user *)collected, mode, rdev);
81021 + sys_chown((char __force_user *)collected, uid, gid);
81022 + sys_chmod((char __force_user *)collected, mode);
81023 + do_utime((char __force_user *)collected, mtime);
81024 }
81025 }
81026 return 0;
81027 @@ -346,15 +346,15 @@ static int __init do_name(void)
81028 static int __init do_copy(void)
81029 {
81030 if (count >= body_len) {
81031 - sys_write(wfd, victim, body_len);
81032 + sys_write(wfd, (char __force_user *)victim, body_len);
81033 sys_close(wfd);
81034 - do_utime(vcollected, mtime);
81035 + do_utime((char __force_user *)vcollected, mtime);
81036 kfree(vcollected);
81037 eat(body_len);
81038 state = SkipIt;
81039 return 0;
81040 } else {
81041 - sys_write(wfd, victim, count);
81042 + sys_write(wfd, (char __force_user *)victim, count);
81043 body_len -= count;
81044 eat(count);
81045 return 1;
81046 @@ -365,9 +365,9 @@ static int __init do_symlink(void)
81047 {
81048 collected[N_ALIGN(name_len) + body_len] = '\0';
81049 clean_path(collected, 0);
81050 - sys_symlink(collected + N_ALIGN(name_len), collected);
81051 - sys_lchown(collected, uid, gid);
81052 - do_utime(collected, mtime);
81053 + sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
81054 + sys_lchown((char __force_user *)collected, uid, gid);
81055 + do_utime((char __force_user *)collected, mtime);
81056 state = SkipIt;
81057 next_state = Reset;
81058 return 0;
81059 @@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
81060 {
81061 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
81062 if (err)
81063 - panic(err); /* Failed to decompress INTERNAL initramfs */
81064 + panic("%s", err); /* Failed to decompress INTERNAL initramfs */
81065 if (initrd_start) {
81066 #ifdef CONFIG_BLK_DEV_RAM
81067 int fd;
81068 diff --git a/init/main.c b/init/main.c
81069 index 63d3e8f..50bd5f8 100644
81070 --- a/init/main.c
81071 +++ b/init/main.c
81072 @@ -103,6 +103,8 @@ static inline void mark_rodata_ro(void) { }
81073 extern void tc_init(void);
81074 #endif
81075
81076 +extern void grsecurity_init(void);
81077 +
81078 /*
81079 * Debug helper: via this flag we know that we are in 'early bootup code'
81080 * where only the boot processor is running with IRQ disabled. This means
81081 @@ -156,6 +158,75 @@ static int __init set_reset_devices(char *str)
81082
81083 __setup("reset_devices", set_reset_devices);
81084
81085 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
81086 +kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
81087 +static int __init setup_grsec_proc_gid(char *str)
81088 +{
81089 + grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
81090 + return 1;
81091 +}
81092 +__setup("grsec_proc_gid=", setup_grsec_proc_gid);
81093 +#endif
81094 +
81095 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
81096 +unsigned long pax_user_shadow_base __read_only;
81097 +EXPORT_SYMBOL(pax_user_shadow_base);
81098 +extern char pax_enter_kernel_user[];
81099 +extern char pax_exit_kernel_user[];
81100 +#endif
81101 +
81102 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
81103 +static int __init setup_pax_nouderef(char *str)
81104 +{
81105 +#ifdef CONFIG_X86_32
81106 + unsigned int cpu;
81107 + struct desc_struct *gdt;
81108 +
81109 + for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
81110 + gdt = get_cpu_gdt_table(cpu);
81111 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
81112 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
81113 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
81114 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
81115 + }
81116 + loadsegment(ds, __KERNEL_DS);
81117 + loadsegment(es, __KERNEL_DS);
81118 + loadsegment(ss, __KERNEL_DS);
81119 +#else
81120 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
81121 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
81122 + clone_pgd_mask = ~(pgdval_t)0UL;
81123 + pax_user_shadow_base = 0UL;
81124 + setup_clear_cpu_cap(X86_FEATURE_PCID);
81125 + setup_clear_cpu_cap(X86_FEATURE_INVPCID);
81126 +#endif
81127 +
81128 + return 0;
81129 +}
81130 +early_param("pax_nouderef", setup_pax_nouderef);
81131 +
81132 +#ifdef CONFIG_X86_64
81133 +static int __init setup_pax_weakuderef(char *str)
81134 +{
81135 + if (clone_pgd_mask != ~(pgdval_t)0UL)
81136 + pax_user_shadow_base = 1UL << TASK_SIZE_MAX_SHIFT;
81137 + return 1;
81138 +}
81139 +__setup("pax_weakuderef", setup_pax_weakuderef);
81140 +#endif
81141 +#endif
81142 +
81143 +#ifdef CONFIG_PAX_SOFTMODE
81144 +int pax_softmode;
81145 +
81146 +static int __init setup_pax_softmode(char *str)
81147 +{
81148 + get_option(&str, &pax_softmode);
81149 + return 1;
81150 +}
81151 +__setup("pax_softmode=", setup_pax_softmode);
81152 +#endif
81153 +
81154 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
81155 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
81156 static const char *panic_later, *panic_param;
81157 @@ -682,25 +753,24 @@ int __init_or_module do_one_initcall(initcall_t fn)
81158 {
81159 int count = preempt_count();
81160 int ret;
81161 - char msgbuf[64];
81162 + const char *msg1 = "", *msg2 = "";
81163
81164 if (initcall_debug)
81165 ret = do_one_initcall_debug(fn);
81166 else
81167 ret = fn();
81168
81169 - msgbuf[0] = 0;
81170 -
81171 if (preempt_count() != count) {
81172 - sprintf(msgbuf, "preemption imbalance ");
81173 + msg1 = " preemption imbalance";
81174 preempt_count() = count;
81175 }
81176 if (irqs_disabled()) {
81177 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
81178 + msg2 = " disabled interrupts";
81179 local_irq_enable();
81180 }
81181 - WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
81182 + WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
81183
81184 + add_latent_entropy();
81185 return ret;
81186 }
81187
81188 @@ -807,10 +877,14 @@ static int run_init_process(const char *init_filename)
81189 {
81190 argv_init[0] = init_filename;
81191 return do_execve(init_filename,
81192 - (const char __user *const __user *)argv_init,
81193 - (const char __user *const __user *)envp_init);
81194 + (const char __user *const __force_user *)argv_init,
81195 + (const char __user *const __force_user *)envp_init);
81196 }
81197
81198 +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
81199 +extern int gr_init_ran;
81200 +#endif
81201 +
81202 static noinline void __init kernel_init_freeable(void);
81203
81204 static int __ref kernel_init(void *unused)
81205 @@ -831,6 +905,11 @@ static int __ref kernel_init(void *unused)
81206 pr_err("Failed to execute %s\n", ramdisk_execute_command);
81207 }
81208
81209 +#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
81210 + /* if no initrd was used, be extra sure we enforce chroot restrictions */
81211 + gr_init_ran = 1;
81212 +#endif
81213 +
81214 /*
81215 * We try each of these until one succeeds.
81216 *
81217 @@ -885,7 +964,7 @@ static noinline void __init kernel_init_freeable(void)
81218 do_basic_setup();
81219
81220 /* Open the /dev/console on the rootfs, this should never fail */
81221 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
81222 + if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
81223 pr_err("Warning: unable to open an initial console.\n");
81224
81225 (void) sys_dup(0);
81226 @@ -898,11 +977,13 @@ static noinline void __init kernel_init_freeable(void)
81227 if (!ramdisk_execute_command)
81228 ramdisk_execute_command = "/init";
81229
81230 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
81231 + if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
81232 ramdisk_execute_command = NULL;
81233 prepare_namespace();
81234 }
81235
81236 + grsecurity_init();
81237 +
81238 /*
81239 * Ok, we have completed the initial bootup, and
81240 * we're essentially up and running. Get rid of the
81241 diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
81242 index b0e99de..09f385c 100644
81243 --- a/ipc/ipc_sysctl.c
81244 +++ b/ipc/ipc_sysctl.c
81245 @@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
81246 static int proc_ipc_dointvec(ctl_table *table, int write,
81247 void __user *buffer, size_t *lenp, loff_t *ppos)
81248 {
81249 - struct ctl_table ipc_table;
81250 + ctl_table_no_const ipc_table;
81251
81252 memcpy(&ipc_table, table, sizeof(ipc_table));
81253 ipc_table.data = get_ipc(table);
81254 @@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
81255 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
81256 void __user *buffer, size_t *lenp, loff_t *ppos)
81257 {
81258 - struct ctl_table ipc_table;
81259 + ctl_table_no_const ipc_table;
81260
81261 memcpy(&ipc_table, table, sizeof(ipc_table));
81262 ipc_table.data = get_ipc(table);
81263 @@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
81264 static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
81265 void __user *buffer, size_t *lenp, loff_t *ppos)
81266 {
81267 - struct ctl_table ipc_table;
81268 + ctl_table_no_const ipc_table;
81269 size_t lenp_bef = *lenp;
81270 int rc;
81271
81272 @@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec_minmax(ctl_table *table, int write,
81273 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
81274 void __user *buffer, size_t *lenp, loff_t *ppos)
81275 {
81276 - struct ctl_table ipc_table;
81277 + ctl_table_no_const ipc_table;
81278 memcpy(&ipc_table, table, sizeof(ipc_table));
81279 ipc_table.data = get_ipc(table);
81280
81281 @@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
81282 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
81283 void __user *buffer, size_t *lenp, loff_t *ppos)
81284 {
81285 - struct ctl_table ipc_table;
81286 + ctl_table_no_const ipc_table;
81287 size_t lenp_bef = *lenp;
81288 int oldval;
81289 int rc;
81290 diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
81291 index 383d638..943fdbb 100644
81292 --- a/ipc/mq_sysctl.c
81293 +++ b/ipc/mq_sysctl.c
81294 @@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
81295 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
81296 void __user *buffer, size_t *lenp, loff_t *ppos)
81297 {
81298 - struct ctl_table mq_table;
81299 + ctl_table_no_const mq_table;
81300 memcpy(&mq_table, table, sizeof(mq_table));
81301 mq_table.data = get_mq(table);
81302
81303 diff --git a/ipc/mqueue.c b/ipc/mqueue.c
81304 index ae1996d..a35f2cc 100644
81305 --- a/ipc/mqueue.c
81306 +++ b/ipc/mqueue.c
81307 @@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
81308 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
81309 info->attr.mq_msgsize);
81310
81311 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
81312 spin_lock(&mq_lock);
81313 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
81314 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
81315 diff --git a/ipc/msg.c b/ipc/msg.c
81316 index 558aa91..359e718 100644
81317 --- a/ipc/msg.c
81318 +++ b/ipc/msg.c
81319 @@ -297,18 +297,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
81320 return security_msg_queue_associate(msq, msgflg);
81321 }
81322
81323 +static struct ipc_ops msg_ops = {
81324 + .getnew = newque,
81325 + .associate = msg_security,
81326 + .more_checks = NULL
81327 +};
81328 +
81329 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
81330 {
81331 struct ipc_namespace *ns;
81332 - struct ipc_ops msg_ops;
81333 struct ipc_params msg_params;
81334
81335 ns = current->nsproxy->ipc_ns;
81336
81337 - msg_ops.getnew = newque;
81338 - msg_ops.associate = msg_security;
81339 - msg_ops.more_checks = NULL;
81340 -
81341 msg_params.key = key;
81342 msg_params.flg = msgflg;
81343
81344 diff --git a/ipc/sem.c b/ipc/sem.c
81345 index db9d241..bc8427c 100644
81346 --- a/ipc/sem.c
81347 +++ b/ipc/sem.c
81348 @@ -562,10 +562,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
81349 return 0;
81350 }
81351
81352 +static struct ipc_ops sem_ops = {
81353 + .getnew = newary,
81354 + .associate = sem_security,
81355 + .more_checks = sem_more_checks
81356 +};
81357 +
81358 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
81359 {
81360 struct ipc_namespace *ns;
81361 - struct ipc_ops sem_ops;
81362 struct ipc_params sem_params;
81363
81364 ns = current->nsproxy->ipc_ns;
81365 @@ -573,10 +578,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
81366 if (nsems < 0 || nsems > ns->sc_semmsl)
81367 return -EINVAL;
81368
81369 - sem_ops.getnew = newary;
81370 - sem_ops.associate = sem_security;
81371 - sem_ops.more_checks = sem_more_checks;
81372 -
81373 sem_params.key = key;
81374 sem_params.flg = semflg;
81375 sem_params.u.nsems = nsems;
81376 diff --git a/ipc/shm.c b/ipc/shm.c
81377 index 7a51443..3a257d8 100644
81378 --- a/ipc/shm.c
81379 +++ b/ipc/shm.c
81380 @@ -72,6 +72,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
81381 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
81382 #endif
81383
81384 +#ifdef CONFIG_GRKERNSEC
81385 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
81386 + const time_t shm_createtime, const kuid_t cuid,
81387 + const int shmid);
81388 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
81389 + const time_t shm_createtime);
81390 +#endif
81391 +
81392 void shm_init_ns(struct ipc_namespace *ns)
81393 {
81394 ns->shm_ctlmax = SHMMAX;
81395 @@ -554,6 +562,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
81396 shp->shm_lprid = 0;
81397 shp->shm_atim = shp->shm_dtim = 0;
81398 shp->shm_ctim = get_seconds();
81399 +#ifdef CONFIG_GRKERNSEC
81400 + {
81401 + struct timespec timeval;
81402 + do_posix_clock_monotonic_gettime(&timeval);
81403 +
81404 + shp->shm_createtime = timeval.tv_sec;
81405 + }
81406 +#endif
81407 shp->shm_segsz = size;
81408 shp->shm_nattch = 0;
81409 shp->shm_file = file;
81410 @@ -607,18 +623,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
81411 return 0;
81412 }
81413
81414 +static struct ipc_ops shm_ops = {
81415 + .getnew = newseg,
81416 + .associate = shm_security,
81417 + .more_checks = shm_more_checks
81418 +};
81419 +
81420 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
81421 {
81422 struct ipc_namespace *ns;
81423 - struct ipc_ops shm_ops;
81424 struct ipc_params shm_params;
81425
81426 ns = current->nsproxy->ipc_ns;
81427
81428 - shm_ops.getnew = newseg;
81429 - shm_ops.associate = shm_security;
81430 - shm_ops.more_checks = shm_more_checks;
81431 -
81432 shm_params.key = key;
81433 shm_params.flg = shmflg;
81434 shm_params.u.size = size;
81435 @@ -1089,6 +1106,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
81436 f_mode = FMODE_READ | FMODE_WRITE;
81437 }
81438 if (shmflg & SHM_EXEC) {
81439 +
81440 +#ifdef CONFIG_PAX_MPROTECT
81441 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
81442 + goto out;
81443 +#endif
81444 +
81445 prot |= PROT_EXEC;
81446 acc_mode |= S_IXUGO;
81447 }
81448 @@ -1113,6 +1136,15 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
81449 if (err)
81450 goto out_unlock;
81451
81452 +#ifdef CONFIG_GRKERNSEC
81453 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
81454 + shp->shm_perm.cuid, shmid) ||
81455 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
81456 + err = -EACCES;
81457 + goto out_unlock;
81458 + }
81459 +#endif
81460 +
81461 ipc_lock_object(&shp->shm_perm);
81462
81463 /* check if shm_destroy() is tearing down shp */
81464 @@ -1125,6 +1157,9 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
81465 path = shp->shm_file->f_path;
81466 path_get(&path);
81467 shp->shm_nattch++;
81468 +#ifdef CONFIG_GRKERNSEC
81469 + shp->shm_lapid = current->pid;
81470 +#endif
81471 size = i_size_read(path.dentry->d_inode);
81472 ipc_unlock_object(&shp->shm_perm);
81473 rcu_read_unlock();
81474 diff --git a/ipc/util.c b/ipc/util.c
81475 index 7684f41..f7da711 100644
81476 --- a/ipc/util.c
81477 +++ b/ipc/util.c
81478 @@ -71,6 +71,8 @@ struct ipc_proc_iface {
81479 int (*show)(struct seq_file *, void *);
81480 };
81481
81482 +extern int gr_ipc_permitted(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, int requested_mode, int granted_mode);
81483 +
81484 static void ipc_memory_notifier(struct work_struct *work)
81485 {
81486 ipcns_notify(IPCNS_MEMCHANGED);
81487 @@ -560,6 +562,9 @@ int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
81488 granted_mode >>= 6;
81489 else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
81490 granted_mode >>= 3;
81491 + else if (!gr_ipc_permitted(ns, ipcp, requested_mode, granted_mode))
81492 + return -1;
81493 +
81494 /* is there some bit set in requested_mode but not in granted_mode? */
81495 if ((requested_mode & ~granted_mode & 0007) &&
81496 !ns_capable(ns->user_ns, CAP_IPC_OWNER))
81497 diff --git a/kernel/acct.c b/kernel/acct.c
81498 index 8d6e145..33e0b1e 100644
81499 --- a/kernel/acct.c
81500 +++ b/kernel/acct.c
81501 @@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
81502 */
81503 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
81504 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
81505 - file->f_op->write(file, (char *)&ac,
81506 + file->f_op->write(file, (char __force_user *)&ac,
81507 sizeof(acct_t), &file->f_pos);
81508 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
81509 set_fs(fs);
81510 diff --git a/kernel/audit.c b/kernel/audit.c
81511 index 7ddfd8a..49766eb 100644
81512 --- a/kernel/audit.c
81513 +++ b/kernel/audit.c
81514 @@ -118,7 +118,7 @@ u32 audit_sig_sid = 0;
81515 3) suppressed due to audit_rate_limit
81516 4) suppressed due to audit_backlog_limit
81517 */
81518 -static atomic_t audit_lost = ATOMIC_INIT(0);
81519 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
81520
81521 /* The netlink socket. */
81522 static struct sock *audit_sock;
81523 @@ -240,7 +240,7 @@ void audit_log_lost(const char *message)
81524 unsigned long now;
81525 int print;
81526
81527 - atomic_inc(&audit_lost);
81528 + atomic_inc_unchecked(&audit_lost);
81529
81530 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
81531
81532 @@ -259,7 +259,7 @@ void audit_log_lost(const char *message)
81533 printk(KERN_WARNING
81534 "audit: audit_lost=%d audit_rate_limit=%d "
81535 "audit_backlog_limit=%d\n",
81536 - atomic_read(&audit_lost),
81537 + atomic_read_unchecked(&audit_lost),
81538 audit_rate_limit,
81539 audit_backlog_limit);
81540 audit_panic(message);
81541 @@ -665,7 +665,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
81542 status_set.pid = audit_pid;
81543 status_set.rate_limit = audit_rate_limit;
81544 status_set.backlog_limit = audit_backlog_limit;
81545 - status_set.lost = atomic_read(&audit_lost);
81546 + status_set.lost = atomic_read_unchecked(&audit_lost);
81547 status_set.backlog = skb_queue_len(&audit_skb_queue);
81548 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
81549 &status_set, sizeof(status_set));
81550 @@ -1252,7 +1252,7 @@ void audit_log_n_hex(struct audit_buffer *ab, const unsigned char *buf,
81551 int i, avail, new_len;
81552 unsigned char *ptr;
81553 struct sk_buff *skb;
81554 - static const unsigned char *hex = "0123456789ABCDEF";
81555 + static const unsigned char hex[] = "0123456789ABCDEF";
81556
81557 if (!ab)
81558 return;
81559 diff --git a/kernel/auditsc.c b/kernel/auditsc.c
81560 index 9845cb3..3ec9369 100644
81561 --- a/kernel/auditsc.c
81562 +++ b/kernel/auditsc.c
81563 @@ -1962,7 +1962,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
81564 }
81565
81566 /* global counter which is incremented every time something logs in */
81567 -static atomic_t session_id = ATOMIC_INIT(0);
81568 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
81569
81570 /**
81571 * audit_set_loginuid - set current task's audit_context loginuid
81572 @@ -1986,7 +1986,7 @@ int audit_set_loginuid(kuid_t loginuid)
81573 return -EPERM;
81574 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
81575
81576 - sessionid = atomic_inc_return(&session_id);
81577 + sessionid = atomic_inc_return_unchecked(&session_id);
81578 if (context && context->in_syscall) {
81579 struct audit_buffer *ab;
81580
81581 diff --git a/kernel/capability.c b/kernel/capability.c
81582 index 4e66bf9..cdccecf 100644
81583 --- a/kernel/capability.c
81584 +++ b/kernel/capability.c
81585 @@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
81586 * before modification is attempted and the application
81587 * fails.
81588 */
81589 + if (tocopy > ARRAY_SIZE(kdata))
81590 + return -EFAULT;
81591 +
81592 if (copy_to_user(dataptr, kdata, tocopy
81593 * sizeof(struct __user_cap_data_struct))) {
81594 return -EFAULT;
81595 @@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
81596 int ret;
81597
81598 rcu_read_lock();
81599 - ret = security_capable(__task_cred(t), ns, cap);
81600 + ret = security_capable(__task_cred(t), ns, cap) == 0 &&
81601 + gr_task_is_capable(t, __task_cred(t), cap);
81602 rcu_read_unlock();
81603
81604 - return (ret == 0);
81605 + return ret;
81606 }
81607
81608 /**
81609 @@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
81610 int ret;
81611
81612 rcu_read_lock();
81613 - ret = security_capable_noaudit(__task_cred(t), ns, cap);
81614 + ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
81615 rcu_read_unlock();
81616
81617 - return (ret == 0);
81618 + return ret;
81619 }
81620
81621 /**
81622 @@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
81623 BUG();
81624 }
81625
81626 - if (security_capable(current_cred(), ns, cap) == 0) {
81627 + if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
81628 current->flags |= PF_SUPERPRIV;
81629 return true;
81630 }
81631 @@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
81632 }
81633 EXPORT_SYMBOL(ns_capable);
81634
81635 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
81636 +{
81637 + if (unlikely(!cap_valid(cap))) {
81638 + printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
81639 + BUG();
81640 + }
81641 +
81642 + if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
81643 + current->flags |= PF_SUPERPRIV;
81644 + return true;
81645 + }
81646 + return false;
81647 +}
81648 +EXPORT_SYMBOL(ns_capable_nolog);
81649 +
81650 /**
81651 * file_ns_capable - Determine if the file's opener had a capability in effect
81652 * @file: The file we want to check
81653 @@ -432,6 +451,12 @@ bool capable(int cap)
81654 }
81655 EXPORT_SYMBOL(capable);
81656
81657 +bool capable_nolog(int cap)
81658 +{
81659 + return ns_capable_nolog(&init_user_ns, cap);
81660 +}
81661 +EXPORT_SYMBOL(capable_nolog);
81662 +
81663 /**
81664 * inode_capable - Check superior capability over inode
81665 * @inode: The inode in question
81666 @@ -453,3 +478,11 @@ bool inode_capable(const struct inode *inode, int cap)
81667 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
81668 }
81669 EXPORT_SYMBOL(inode_capable);
81670 +
81671 +bool inode_capable_nolog(const struct inode *inode, int cap)
81672 +{
81673 + struct user_namespace *ns = current_user_ns();
81674 +
81675 + return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
81676 +}
81677 +EXPORT_SYMBOL(inode_capable_nolog);
81678 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
81679 index 5c9127d..f871169 100644
81680 --- a/kernel/cgroup.c
81681 +++ b/kernel/cgroup.c
81682 @@ -5844,7 +5844,7 @@ static int cgroup_css_links_read(struct cgroup_subsys_state *css,
81683 struct css_set *cset = link->cset;
81684 struct task_struct *task;
81685 int count = 0;
81686 - seq_printf(seq, "css_set %p\n", cset);
81687 + seq_printf(seq, "css_set %pK\n", cset);
81688 list_for_each_entry(task, &cset->tasks, cg_list) {
81689 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
81690 seq_puts(seq, " ...\n");
81691 diff --git a/kernel/compat.c b/kernel/compat.c
81692 index 0a09e48..f44f3f0 100644
81693 --- a/kernel/compat.c
81694 +++ b/kernel/compat.c
81695 @@ -13,6 +13,7 @@
81696
81697 #include <linux/linkage.h>
81698 #include <linux/compat.h>
81699 +#include <linux/module.h>
81700 #include <linux/errno.h>
81701 #include <linux/time.h>
81702 #include <linux/signal.h>
81703 @@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
81704 mm_segment_t oldfs;
81705 long ret;
81706
81707 - restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
81708 + restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
81709 oldfs = get_fs();
81710 set_fs(KERNEL_DS);
81711 ret = hrtimer_nanosleep_restart(restart);
81712 @@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
81713 oldfs = get_fs();
81714 set_fs(KERNEL_DS);
81715 ret = hrtimer_nanosleep(&tu,
81716 - rmtp ? (struct timespec __user *)&rmt : NULL,
81717 + rmtp ? (struct timespec __force_user *)&rmt : NULL,
81718 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
81719 set_fs(oldfs);
81720
81721 @@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
81722 mm_segment_t old_fs = get_fs();
81723
81724 set_fs(KERNEL_DS);
81725 - ret = sys_sigpending((old_sigset_t __user *) &s);
81726 + ret = sys_sigpending((old_sigset_t __force_user *) &s);
81727 set_fs(old_fs);
81728 if (ret == 0)
81729 ret = put_user(s, set);
81730 @@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
81731 mm_segment_t old_fs = get_fs();
81732
81733 set_fs(KERNEL_DS);
81734 - ret = sys_old_getrlimit(resource, &r);
81735 + ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
81736 set_fs(old_fs);
81737
81738 if (!ret) {
81739 @@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
81740 set_fs (KERNEL_DS);
81741 ret = sys_wait4(pid,
81742 (stat_addr ?
81743 - (unsigned int __user *) &status : NULL),
81744 - options, (struct rusage __user *) &r);
81745 + (unsigned int __force_user *) &status : NULL),
81746 + options, (struct rusage __force_user *) &r);
81747 set_fs (old_fs);
81748
81749 if (ret > 0) {
81750 @@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
81751 memset(&info, 0, sizeof(info));
81752
81753 set_fs(KERNEL_DS);
81754 - ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
81755 - uru ? (struct rusage __user *)&ru : NULL);
81756 + ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
81757 + uru ? (struct rusage __force_user *)&ru : NULL);
81758 set_fs(old_fs);
81759
81760 if ((ret < 0) || (info.si_signo == 0))
81761 @@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
81762 oldfs = get_fs();
81763 set_fs(KERNEL_DS);
81764 err = sys_timer_settime(timer_id, flags,
81765 - (struct itimerspec __user *) &newts,
81766 - (struct itimerspec __user *) &oldts);
81767 + (struct itimerspec __force_user *) &newts,
81768 + (struct itimerspec __force_user *) &oldts);
81769 set_fs(oldfs);
81770 if (!err && old && put_compat_itimerspec(old, &oldts))
81771 return -EFAULT;
81772 @@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
81773 oldfs = get_fs();
81774 set_fs(KERNEL_DS);
81775 err = sys_timer_gettime(timer_id,
81776 - (struct itimerspec __user *) &ts);
81777 + (struct itimerspec __force_user *) &ts);
81778 set_fs(oldfs);
81779 if (!err && put_compat_itimerspec(setting, &ts))
81780 return -EFAULT;
81781 @@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
81782 oldfs = get_fs();
81783 set_fs(KERNEL_DS);
81784 err = sys_clock_settime(which_clock,
81785 - (struct timespec __user *) &ts);
81786 + (struct timespec __force_user *) &ts);
81787 set_fs(oldfs);
81788 return err;
81789 }
81790 @@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
81791 oldfs = get_fs();
81792 set_fs(KERNEL_DS);
81793 err = sys_clock_gettime(which_clock,
81794 - (struct timespec __user *) &ts);
81795 + (struct timespec __force_user *) &ts);
81796 set_fs(oldfs);
81797 if (!err && put_compat_timespec(&ts, tp))
81798 return -EFAULT;
81799 @@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
81800
81801 oldfs = get_fs();
81802 set_fs(KERNEL_DS);
81803 - ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
81804 + ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
81805 set_fs(oldfs);
81806
81807 err = compat_put_timex(utp, &txc);
81808 @@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
81809 oldfs = get_fs();
81810 set_fs(KERNEL_DS);
81811 err = sys_clock_getres(which_clock,
81812 - (struct timespec __user *) &ts);
81813 + (struct timespec __force_user *) &ts);
81814 set_fs(oldfs);
81815 if (!err && tp && put_compat_timespec(&ts, tp))
81816 return -EFAULT;
81817 @@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
81818 long err;
81819 mm_segment_t oldfs;
81820 struct timespec tu;
81821 - struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
81822 + struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
81823
81824 - restart->nanosleep.rmtp = (struct timespec __user *) &tu;
81825 + restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
81826 oldfs = get_fs();
81827 set_fs(KERNEL_DS);
81828 err = clock_nanosleep_restart(restart);
81829 @@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
81830 oldfs = get_fs();
81831 set_fs(KERNEL_DS);
81832 err = sys_clock_nanosleep(which_clock, flags,
81833 - (struct timespec __user *) &in,
81834 - (struct timespec __user *) &out);
81835 + (struct timespec __force_user *) &in,
81836 + (struct timespec __force_user *) &out);
81837 set_fs(oldfs);
81838
81839 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
81840 diff --git a/kernel/configs.c b/kernel/configs.c
81841 index c18b1f1..b9a0132 100644
81842 --- a/kernel/configs.c
81843 +++ b/kernel/configs.c
81844 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
81845 struct proc_dir_entry *entry;
81846
81847 /* create the current config file */
81848 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
81849 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
81850 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
81851 + &ikconfig_file_ops);
81852 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
81853 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
81854 + &ikconfig_file_ops);
81855 +#endif
81856 +#else
81857 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
81858 &ikconfig_file_ops);
81859 +#endif
81860 +
81861 if (!entry)
81862 return -ENOMEM;
81863
81864 diff --git a/kernel/cred.c b/kernel/cred.c
81865 index e0573a4..3874e41 100644
81866 --- a/kernel/cred.c
81867 +++ b/kernel/cred.c
81868 @@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
81869 validate_creds(cred);
81870 alter_cred_subscribers(cred, -1);
81871 put_cred(cred);
81872 +
81873 +#ifdef CONFIG_GRKERNSEC_SETXID
81874 + cred = (struct cred *) tsk->delayed_cred;
81875 + if (cred != NULL) {
81876 + tsk->delayed_cred = NULL;
81877 + validate_creds(cred);
81878 + alter_cred_subscribers(cred, -1);
81879 + put_cred(cred);
81880 + }
81881 +#endif
81882 }
81883
81884 /**
81885 @@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
81886 * Always returns 0 thus allowing this function to be tail-called at the end
81887 * of, say, sys_setgid().
81888 */
81889 -int commit_creds(struct cred *new)
81890 +static int __commit_creds(struct cred *new)
81891 {
81892 struct task_struct *task = current;
81893 const struct cred *old = task->real_cred;
81894 @@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
81895
81896 get_cred(new); /* we will require a ref for the subj creds too */
81897
81898 + gr_set_role_label(task, new->uid, new->gid);
81899 +
81900 /* dumpability changes */
81901 if (!uid_eq(old->euid, new->euid) ||
81902 !gid_eq(old->egid, new->egid) ||
81903 @@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
81904 put_cred(old);
81905 return 0;
81906 }
81907 +#ifdef CONFIG_GRKERNSEC_SETXID
81908 +extern int set_user(struct cred *new);
81909 +
81910 +void gr_delayed_cred_worker(void)
81911 +{
81912 + const struct cred *new = current->delayed_cred;
81913 + struct cred *ncred;
81914 +
81915 + current->delayed_cred = NULL;
81916 +
81917 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
81918 + // from doing get_cred on it when queueing this
81919 + put_cred(new);
81920 + return;
81921 + } else if (new == NULL)
81922 + return;
81923 +
81924 + ncred = prepare_creds();
81925 + if (!ncred)
81926 + goto die;
81927 + // uids
81928 + ncred->uid = new->uid;
81929 + ncred->euid = new->euid;
81930 + ncred->suid = new->suid;
81931 + ncred->fsuid = new->fsuid;
81932 + // gids
81933 + ncred->gid = new->gid;
81934 + ncred->egid = new->egid;
81935 + ncred->sgid = new->sgid;
81936 + ncred->fsgid = new->fsgid;
81937 + // groups
81938 + if (set_groups(ncred, new->group_info) < 0) {
81939 + abort_creds(ncred);
81940 + goto die;
81941 + }
81942 + // caps
81943 + ncred->securebits = new->securebits;
81944 + ncred->cap_inheritable = new->cap_inheritable;
81945 + ncred->cap_permitted = new->cap_permitted;
81946 + ncred->cap_effective = new->cap_effective;
81947 + ncred->cap_bset = new->cap_bset;
81948 +
81949 + if (set_user(ncred)) {
81950 + abort_creds(ncred);
81951 + goto die;
81952 + }
81953 +
81954 + // from doing get_cred on it when queueing this
81955 + put_cred(new);
81956 +
81957 + __commit_creds(ncred);
81958 + return;
81959 +die:
81960 + // from doing get_cred on it when queueing this
81961 + put_cred(new);
81962 + do_group_exit(SIGKILL);
81963 +}
81964 +#endif
81965 +
81966 +int commit_creds(struct cred *new)
81967 +{
81968 +#ifdef CONFIG_GRKERNSEC_SETXID
81969 + int ret;
81970 + int schedule_it = 0;
81971 + struct task_struct *t;
81972 +
81973 + /* we won't get called with tasklist_lock held for writing
81974 + and interrupts disabled as the cred struct in that case is
81975 + init_cred
81976 + */
81977 + if (grsec_enable_setxid && !current_is_single_threaded() &&
81978 + uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
81979 + !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
81980 + schedule_it = 1;
81981 + }
81982 + ret = __commit_creds(new);
81983 + if (schedule_it) {
81984 + rcu_read_lock();
81985 + read_lock(&tasklist_lock);
81986 + for (t = next_thread(current); t != current;
81987 + t = next_thread(t)) {
81988 + if (t->delayed_cred == NULL) {
81989 + t->delayed_cred = get_cred(new);
81990 + set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
81991 + set_tsk_need_resched(t);
81992 + }
81993 + }
81994 + read_unlock(&tasklist_lock);
81995 + rcu_read_unlock();
81996 + }
81997 + return ret;
81998 +#else
81999 + return __commit_creds(new);
82000 +#endif
82001 +}
82002 +
82003 EXPORT_SYMBOL(commit_creds);
82004
82005 /**
82006 diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
82007 index 0506d44..2c20034 100644
82008 --- a/kernel/debug/debug_core.c
82009 +++ b/kernel/debug/debug_core.c
82010 @@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
82011 */
82012 static atomic_t masters_in_kgdb;
82013 static atomic_t slaves_in_kgdb;
82014 -static atomic_t kgdb_break_tasklet_var;
82015 +static atomic_unchecked_t kgdb_break_tasklet_var;
82016 atomic_t kgdb_setting_breakpoint;
82017
82018 struct task_struct *kgdb_usethread;
82019 @@ -133,7 +133,7 @@ int kgdb_single_step;
82020 static pid_t kgdb_sstep_pid;
82021
82022 /* to keep track of the CPU which is doing the single stepping*/
82023 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
82024 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
82025
82026 /*
82027 * If you are debugging a problem where roundup (the collection of
82028 @@ -541,7 +541,7 @@ return_normal:
82029 * kernel will only try for the value of sstep_tries before
82030 * giving up and continuing on.
82031 */
82032 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
82033 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
82034 (kgdb_info[cpu].task &&
82035 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
82036 atomic_set(&kgdb_active, -1);
82037 @@ -635,8 +635,8 @@ cpu_master_loop:
82038 }
82039
82040 kgdb_restore:
82041 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
82042 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
82043 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
82044 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
82045 if (kgdb_info[sstep_cpu].task)
82046 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
82047 else
82048 @@ -888,18 +888,18 @@ static void kgdb_unregister_callbacks(void)
82049 static void kgdb_tasklet_bpt(unsigned long ing)
82050 {
82051 kgdb_breakpoint();
82052 - atomic_set(&kgdb_break_tasklet_var, 0);
82053 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
82054 }
82055
82056 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
82057
82058 void kgdb_schedule_breakpoint(void)
82059 {
82060 - if (atomic_read(&kgdb_break_tasklet_var) ||
82061 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
82062 atomic_read(&kgdb_active) != -1 ||
82063 atomic_read(&kgdb_setting_breakpoint))
82064 return;
82065 - atomic_inc(&kgdb_break_tasklet_var);
82066 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
82067 tasklet_schedule(&kgdb_tasklet_breakpoint);
82068 }
82069 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
82070 diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
82071 index 00eb8f7..d7e3244 100644
82072 --- a/kernel/debug/kdb/kdb_main.c
82073 +++ b/kernel/debug/kdb/kdb_main.c
82074 @@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
82075 continue;
82076
82077 kdb_printf("%-20s%8u 0x%p ", mod->name,
82078 - mod->core_size, (void *)mod);
82079 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
82080 #ifdef CONFIG_MODULE_UNLOAD
82081 kdb_printf("%4ld ", module_refcount(mod));
82082 #endif
82083 @@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
82084 kdb_printf(" (Loading)");
82085 else
82086 kdb_printf(" (Live)");
82087 - kdb_printf(" 0x%p", mod->module_core);
82088 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
82089
82090 #ifdef CONFIG_MODULE_UNLOAD
82091 {
82092 diff --git a/kernel/events/core.c b/kernel/events/core.c
82093 index 953c143..5646bb1 100644
82094 --- a/kernel/events/core.c
82095 +++ b/kernel/events/core.c
82096 @@ -157,8 +157,15 @@ static struct srcu_struct pmus_srcu;
82097 * 0 - disallow raw tracepoint access for unpriv
82098 * 1 - disallow cpu events for unpriv
82099 * 2 - disallow kernel profiling for unpriv
82100 + * 3 - disallow all unpriv perf event use
82101 */
82102 -int sysctl_perf_event_paranoid __read_mostly = 1;
82103 +#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
82104 +int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
82105 +#elif defined(CONFIG_GRKERNSEC_HIDESYM)
82106 +int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
82107 +#else
82108 +int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
82109 +#endif
82110
82111 /* Minimum for 512 kiB + 1 user control page */
82112 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
82113 @@ -271,7 +278,7 @@ void perf_sample_event_took(u64 sample_len_ns)
82114 update_perf_cpu_limits();
82115 }
82116
82117 -static atomic64_t perf_event_id;
82118 +static atomic64_unchecked_t perf_event_id;
82119
82120 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
82121 enum event_type_t event_type);
82122 @@ -2940,7 +2947,7 @@ static void __perf_event_read(void *info)
82123
82124 static inline u64 perf_event_count(struct perf_event *event)
82125 {
82126 - return local64_read(&event->count) + atomic64_read(&event->child_count);
82127 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
82128 }
82129
82130 static u64 perf_event_read(struct perf_event *event)
82131 @@ -3308,9 +3315,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
82132 mutex_lock(&event->child_mutex);
82133 total += perf_event_read(event);
82134 *enabled += event->total_time_enabled +
82135 - atomic64_read(&event->child_total_time_enabled);
82136 + atomic64_read_unchecked(&event->child_total_time_enabled);
82137 *running += event->total_time_running +
82138 - atomic64_read(&event->child_total_time_running);
82139 + atomic64_read_unchecked(&event->child_total_time_running);
82140
82141 list_for_each_entry(child, &event->child_list, child_list) {
82142 total += perf_event_read(child);
82143 @@ -3725,10 +3732,10 @@ void perf_event_update_userpage(struct perf_event *event)
82144 userpg->offset -= local64_read(&event->hw.prev_count);
82145
82146 userpg->time_enabled = enabled +
82147 - atomic64_read(&event->child_total_time_enabled);
82148 + atomic64_read_unchecked(&event->child_total_time_enabled);
82149
82150 userpg->time_running = running +
82151 - atomic64_read(&event->child_total_time_running);
82152 + atomic64_read_unchecked(&event->child_total_time_running);
82153
82154 arch_perf_update_userpage(userpg, now);
82155
82156 @@ -4279,7 +4286,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
82157
82158 /* Data. */
82159 sp = perf_user_stack_pointer(regs);
82160 - rem = __output_copy_user(handle, (void *) sp, dump_size);
82161 + rem = __output_copy_user(handle, (void __user *) sp, dump_size);
82162 dyn_size = dump_size - rem;
82163
82164 perf_output_skip(handle, rem);
82165 @@ -4370,11 +4377,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
82166 values[n++] = perf_event_count(event);
82167 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
82168 values[n++] = enabled +
82169 - atomic64_read(&event->child_total_time_enabled);
82170 + atomic64_read_unchecked(&event->child_total_time_enabled);
82171 }
82172 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
82173 values[n++] = running +
82174 - atomic64_read(&event->child_total_time_running);
82175 + atomic64_read_unchecked(&event->child_total_time_running);
82176 }
82177 if (read_format & PERF_FORMAT_ID)
82178 values[n++] = primary_event_id(event);
82179 @@ -5112,12 +5119,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
82180 * need to add enough zero bytes after the string to handle
82181 * the 64bit alignment we do later.
82182 */
82183 - buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
82184 + buf = kzalloc(PATH_MAX, GFP_KERNEL);
82185 if (!buf) {
82186 name = strncpy(tmp, "//enomem", sizeof(tmp));
82187 goto got_name;
82188 }
82189 - name = d_path(&file->f_path, buf, PATH_MAX);
82190 + name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
82191 if (IS_ERR(name)) {
82192 name = strncpy(tmp, "//toolong", sizeof(tmp));
82193 goto got_name;
82194 @@ -6639,7 +6646,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
82195 event->parent = parent_event;
82196
82197 event->ns = get_pid_ns(task_active_pid_ns(current));
82198 - event->id = atomic64_inc_return(&perf_event_id);
82199 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
82200
82201 event->state = PERF_EVENT_STATE_INACTIVE;
82202
82203 @@ -6938,6 +6945,11 @@ SYSCALL_DEFINE5(perf_event_open,
82204 if (flags & ~PERF_FLAG_ALL)
82205 return -EINVAL;
82206
82207 +#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
82208 + if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
82209 + return -EACCES;
82210 +#endif
82211 +
82212 err = perf_copy_attr(attr_uptr, &attr);
82213 if (err)
82214 return err;
82215 @@ -7271,10 +7283,10 @@ static void sync_child_event(struct perf_event *child_event,
82216 /*
82217 * Add back the child's count to the parent's count:
82218 */
82219 - atomic64_add(child_val, &parent_event->child_count);
82220 - atomic64_add(child_event->total_time_enabled,
82221 + atomic64_add_unchecked(child_val, &parent_event->child_count);
82222 + atomic64_add_unchecked(child_event->total_time_enabled,
82223 &parent_event->child_total_time_enabled);
82224 - atomic64_add(child_event->total_time_running,
82225 + atomic64_add_unchecked(child_event->total_time_running,
82226 &parent_event->child_total_time_running);
82227
82228 /*
82229 diff --git a/kernel/events/internal.h b/kernel/events/internal.h
82230 index ca65997..60df03d 100644
82231 --- a/kernel/events/internal.h
82232 +++ b/kernel/events/internal.h
82233 @@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
82234 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
82235 }
82236
82237 -#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
82238 -static inline unsigned int \
82239 +#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
82240 +static inline unsigned long \
82241 func_name(struct perf_output_handle *handle, \
82242 - const void *buf, unsigned int len) \
82243 + const void user *buf, unsigned long len) \
82244 { \
82245 unsigned long size, written; \
82246 \
82247 @@ -116,17 +116,17 @@ static inline int memcpy_common(void *dst, const void *src, size_t n)
82248 return n;
82249 }
82250
82251 -DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
82252 +DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
82253
82254 #define MEMCPY_SKIP(dst, src, n) (n)
82255
82256 -DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
82257 +DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP, )
82258
82259 #ifndef arch_perf_out_copy_user
82260 #define arch_perf_out_copy_user __copy_from_user_inatomic
82261 #endif
82262
82263 -DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
82264 +DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
82265
82266 /* Callchain handling */
82267 extern struct perf_callchain_entry *
82268 diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
82269 index ad8e1bd..fed7ba9 100644
82270 --- a/kernel/events/uprobes.c
82271 +++ b/kernel/events/uprobes.c
82272 @@ -1556,7 +1556,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
82273 {
82274 struct page *page;
82275 uprobe_opcode_t opcode;
82276 - int result;
82277 + long result;
82278
82279 pagefault_disable();
82280 result = __copy_from_user_inatomic(&opcode, (void __user*)vaddr,
82281 diff --git a/kernel/exit.c b/kernel/exit.c
82282 index a949819..a5f127d 100644
82283 --- a/kernel/exit.c
82284 +++ b/kernel/exit.c
82285 @@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
82286 struct task_struct *leader;
82287 int zap_leader;
82288 repeat:
82289 +#ifdef CONFIG_NET
82290 + gr_del_task_from_ip_table(p);
82291 +#endif
82292 +
82293 /* don't need to get the RCU readlock here - the process is dead and
82294 * can't be modifying its own credentials. But shut RCU-lockdep up */
82295 rcu_read_lock();
82296 @@ -329,7 +333,7 @@ int allow_signal(int sig)
82297 * know it'll be handled, so that they don't get converted to
82298 * SIGKILL or just silently dropped.
82299 */
82300 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
82301 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
82302 recalc_sigpending();
82303 spin_unlock_irq(&current->sighand->siglock);
82304 return 0;
82305 @@ -698,6 +702,8 @@ void do_exit(long code)
82306 struct task_struct *tsk = current;
82307 int group_dead;
82308
82309 + set_fs(USER_DS);
82310 +
82311 profile_task_exit(tsk);
82312
82313 WARN_ON(blk_needs_flush_plug(tsk));
82314 @@ -714,7 +720,6 @@ void do_exit(long code)
82315 * mm_release()->clear_child_tid() from writing to a user-controlled
82316 * kernel address.
82317 */
82318 - set_fs(USER_DS);
82319
82320 ptrace_event(PTRACE_EVENT_EXIT, code);
82321
82322 @@ -773,6 +778,9 @@ void do_exit(long code)
82323 tsk->exit_code = code;
82324 taskstats_exit(tsk, group_dead);
82325
82326 + gr_acl_handle_psacct(tsk, code);
82327 + gr_acl_handle_exit();
82328 +
82329 exit_mm(tsk);
82330
82331 if (group_dead)
82332 @@ -894,7 +902,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
82333 * Take down every thread in the group. This is called by fatal signals
82334 * as well as by sys_exit_group (below).
82335 */
82336 -void
82337 +__noreturn void
82338 do_group_exit(int exit_code)
82339 {
82340 struct signal_struct *sig = current->signal;
82341 diff --git a/kernel/fork.c b/kernel/fork.c
82342 index 086fe73..72c1122 100644
82343 --- a/kernel/fork.c
82344 +++ b/kernel/fork.c
82345 @@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
82346 *stackend = STACK_END_MAGIC; /* for overflow detection */
82347
82348 #ifdef CONFIG_CC_STACKPROTECTOR
82349 - tsk->stack_canary = get_random_int();
82350 + tsk->stack_canary = pax_get_random_long();
82351 #endif
82352
82353 /*
82354 @@ -345,12 +345,80 @@ free_tsk:
82355 }
82356
82357 #ifdef CONFIG_MMU
82358 -static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
82359 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
82360 +{
82361 + struct vm_area_struct *tmp;
82362 + unsigned long charge;
82363 + struct file *file;
82364 + int retval;
82365 +
82366 + charge = 0;
82367 + if (mpnt->vm_flags & VM_ACCOUNT) {
82368 + unsigned long len = vma_pages(mpnt);
82369 +
82370 + if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
82371 + goto fail_nomem;
82372 + charge = len;
82373 + }
82374 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
82375 + if (!tmp)
82376 + goto fail_nomem;
82377 + *tmp = *mpnt;
82378 + tmp->vm_mm = mm;
82379 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
82380 + retval = vma_dup_policy(mpnt, tmp);
82381 + if (retval)
82382 + goto fail_nomem_policy;
82383 + if (anon_vma_fork(tmp, mpnt))
82384 + goto fail_nomem_anon_vma_fork;
82385 + tmp->vm_flags &= ~VM_LOCKED;
82386 + tmp->vm_next = tmp->vm_prev = NULL;
82387 + tmp->vm_mirror = NULL;
82388 + file = tmp->vm_file;
82389 + if (file) {
82390 + struct inode *inode = file_inode(file);
82391 + struct address_space *mapping = file->f_mapping;
82392 +
82393 + get_file(file);
82394 + if (tmp->vm_flags & VM_DENYWRITE)
82395 + atomic_dec(&inode->i_writecount);
82396 + mutex_lock(&mapping->i_mmap_mutex);
82397 + if (tmp->vm_flags & VM_SHARED)
82398 + mapping->i_mmap_writable++;
82399 + flush_dcache_mmap_lock(mapping);
82400 + /* insert tmp into the share list, just after mpnt */
82401 + if (unlikely(tmp->vm_flags & VM_NONLINEAR))
82402 + vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
82403 + else
82404 + vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
82405 + flush_dcache_mmap_unlock(mapping);
82406 + mutex_unlock(&mapping->i_mmap_mutex);
82407 + }
82408 +
82409 + /*
82410 + * Clear hugetlb-related page reserves for children. This only
82411 + * affects MAP_PRIVATE mappings. Faults generated by the child
82412 + * are not guaranteed to succeed, even if read-only
82413 + */
82414 + if (is_vm_hugetlb_page(tmp))
82415 + reset_vma_resv_huge_pages(tmp);
82416 +
82417 + return tmp;
82418 +
82419 +fail_nomem_anon_vma_fork:
82420 + mpol_put(vma_policy(tmp));
82421 +fail_nomem_policy:
82422 + kmem_cache_free(vm_area_cachep, tmp);
82423 +fail_nomem:
82424 + vm_unacct_memory(charge);
82425 + return NULL;
82426 +}
82427 +
82428 +static __latent_entropy int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
82429 {
82430 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
82431 struct rb_node **rb_link, *rb_parent;
82432 int retval;
82433 - unsigned long charge;
82434
82435 uprobe_start_dup_mmap();
82436 down_write(&oldmm->mmap_sem);
82437 @@ -379,55 +447,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
82438
82439 prev = NULL;
82440 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
82441 - struct file *file;
82442 -
82443 if (mpnt->vm_flags & VM_DONTCOPY) {
82444 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
82445 -vma_pages(mpnt));
82446 continue;
82447 }
82448 - charge = 0;
82449 - if (mpnt->vm_flags & VM_ACCOUNT) {
82450 - unsigned long len = vma_pages(mpnt);
82451 -
82452 - if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
82453 - goto fail_nomem;
82454 - charge = len;
82455 - }
82456 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
82457 - if (!tmp)
82458 - goto fail_nomem;
82459 - *tmp = *mpnt;
82460 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
82461 - retval = vma_dup_policy(mpnt, tmp);
82462 - if (retval)
82463 - goto fail_nomem_policy;
82464 - tmp->vm_mm = mm;
82465 - if (anon_vma_fork(tmp, mpnt))
82466 - goto fail_nomem_anon_vma_fork;
82467 - tmp->vm_flags &= ~VM_LOCKED;
82468 - tmp->vm_next = tmp->vm_prev = NULL;
82469 - file = tmp->vm_file;
82470 - if (file) {
82471 - struct inode *inode = file_inode(file);
82472 - struct address_space *mapping = file->f_mapping;
82473 -
82474 - get_file(file);
82475 - if (tmp->vm_flags & VM_DENYWRITE)
82476 - atomic_dec(&inode->i_writecount);
82477 - mutex_lock(&mapping->i_mmap_mutex);
82478 - if (tmp->vm_flags & VM_SHARED)
82479 - mapping->i_mmap_writable++;
82480 - flush_dcache_mmap_lock(mapping);
82481 - /* insert tmp into the share list, just after mpnt */
82482 - if (unlikely(tmp->vm_flags & VM_NONLINEAR))
82483 - vma_nonlinear_insert(tmp,
82484 - &mapping->i_mmap_nonlinear);
82485 - else
82486 - vma_interval_tree_insert_after(tmp, mpnt,
82487 - &mapping->i_mmap);
82488 - flush_dcache_mmap_unlock(mapping);
82489 - mutex_unlock(&mapping->i_mmap_mutex);
82490 + tmp = dup_vma(mm, oldmm, mpnt);
82491 + if (!tmp) {
82492 + retval = -ENOMEM;
82493 + goto out;
82494 }
82495
82496 /*
82497 @@ -459,6 +487,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
82498 if (retval)
82499 goto out;
82500 }
82501 +
82502 +#ifdef CONFIG_PAX_SEGMEXEC
82503 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
82504 + struct vm_area_struct *mpnt_m;
82505 +
82506 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
82507 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
82508 +
82509 + if (!mpnt->vm_mirror)
82510 + continue;
82511 +
82512 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
82513 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
82514 + mpnt->vm_mirror = mpnt_m;
82515 + } else {
82516 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
82517 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
82518 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
82519 + mpnt->vm_mirror->vm_mirror = mpnt;
82520 + }
82521 + }
82522 + BUG_ON(mpnt_m);
82523 + }
82524 +#endif
82525 +
82526 /* a new mm has just been created */
82527 arch_dup_mmap(oldmm, mm);
82528 retval = 0;
82529 @@ -468,14 +521,6 @@ out:
82530 up_write(&oldmm->mmap_sem);
82531 uprobe_end_dup_mmap();
82532 return retval;
82533 -fail_nomem_anon_vma_fork:
82534 - mpol_put(vma_policy(tmp));
82535 -fail_nomem_policy:
82536 - kmem_cache_free(vm_area_cachep, tmp);
82537 -fail_nomem:
82538 - retval = -ENOMEM;
82539 - vm_unacct_memory(charge);
82540 - goto out;
82541 }
82542
82543 static inline int mm_alloc_pgd(struct mm_struct *mm)
82544 @@ -688,8 +733,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
82545 return ERR_PTR(err);
82546
82547 mm = get_task_mm(task);
82548 - if (mm && mm != current->mm &&
82549 - !ptrace_may_access(task, mode)) {
82550 + if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
82551 + (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
82552 mmput(mm);
82553 mm = ERR_PTR(-EACCES);
82554 }
82555 @@ -911,13 +956,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
82556 spin_unlock(&fs->lock);
82557 return -EAGAIN;
82558 }
82559 - fs->users++;
82560 + atomic_inc(&fs->users);
82561 spin_unlock(&fs->lock);
82562 return 0;
82563 }
82564 tsk->fs = copy_fs_struct(fs);
82565 if (!tsk->fs)
82566 return -ENOMEM;
82567 + /* Carry through gr_chroot_dentry and is_chrooted instead
82568 + of recomputing it here. Already copied when the task struct
82569 + is duplicated. This allows pivot_root to not be treated as
82570 + a chroot
82571 + */
82572 + //gr_set_chroot_entries(tsk, &tsk->fs->root);
82573 +
82574 return 0;
82575 }
82576
82577 @@ -1128,7 +1180,7 @@ init_task_pid(struct task_struct *task, enum pid_type type, struct pid *pid)
82578 * parts of the process environment (as per the clone
82579 * flags). The actual kick-off is left to the caller.
82580 */
82581 -static struct task_struct *copy_process(unsigned long clone_flags,
82582 +static __latent_entropy struct task_struct *copy_process(unsigned long clone_flags,
82583 unsigned long stack_start,
82584 unsigned long stack_size,
82585 int __user *child_tidptr,
82586 @@ -1200,6 +1252,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
82587 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
82588 #endif
82589 retval = -EAGAIN;
82590 +
82591 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
82592 +
82593 if (atomic_read(&p->real_cred->user->processes) >=
82594 task_rlimit(p, RLIMIT_NPROC)) {
82595 if (p->real_cred->user != INIT_USER &&
82596 @@ -1449,6 +1504,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
82597 goto bad_fork_free_pid;
82598 }
82599
82600 + /* synchronizes with gr_set_acls()
82601 + we need to call this past the point of no return for fork()
82602 + */
82603 + gr_copy_label(p);
82604 +
82605 if (likely(p->pid)) {
82606 ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
82607
82608 @@ -1534,6 +1594,8 @@ bad_fork_cleanup_count:
82609 bad_fork_free:
82610 free_task(p);
82611 fork_out:
82612 + gr_log_forkfail(retval);
82613 +
82614 return ERR_PTR(retval);
82615 }
82616
82617 @@ -1595,6 +1657,7 @@ long do_fork(unsigned long clone_flags,
82618
82619 p = copy_process(clone_flags, stack_start, stack_size,
82620 child_tidptr, NULL, trace);
82621 + add_latent_entropy();
82622 /*
82623 * Do this prior waking up the new thread - the thread pointer
82624 * might get invalid after that point, if the thread exits quickly.
82625 @@ -1609,6 +1672,8 @@ long do_fork(unsigned long clone_flags,
82626 if (clone_flags & CLONE_PARENT_SETTID)
82627 put_user(nr, parent_tidptr);
82628
82629 + gr_handle_brute_check();
82630 +
82631 if (clone_flags & CLONE_VFORK) {
82632 p->vfork_done = &vfork;
82633 init_completion(&vfork);
82634 @@ -1725,7 +1790,7 @@ void __init proc_caches_init(void)
82635 mm_cachep = kmem_cache_create("mm_struct",
82636 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
82637 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
82638 - vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
82639 + vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
82640 mmap_init();
82641 nsproxy_cache_init();
82642 }
82643 @@ -1765,7 +1830,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
82644 return 0;
82645
82646 /* don't need lock here; in the worst case we'll do useless copy */
82647 - if (fs->users == 1)
82648 + if (atomic_read(&fs->users) == 1)
82649 return 0;
82650
82651 *new_fsp = copy_fs_struct(fs);
82652 @@ -1872,7 +1937,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
82653 fs = current->fs;
82654 spin_lock(&fs->lock);
82655 current->fs = new_fs;
82656 - if (--fs->users)
82657 + gr_set_chroot_entries(current, &current->fs->root);
82658 + if (atomic_dec_return(&fs->users))
82659 new_fs = NULL;
82660 else
82661 new_fs = fs;
82662 diff --git a/kernel/futex.c b/kernel/futex.c
82663 index 221a58f..1b8cfce 100644
82664 --- a/kernel/futex.c
82665 +++ b/kernel/futex.c
82666 @@ -54,6 +54,7 @@
82667 #include <linux/mount.h>
82668 #include <linux/pagemap.h>
82669 #include <linux/syscalls.h>
82670 +#include <linux/ptrace.h>
82671 #include <linux/signal.h>
82672 #include <linux/export.h>
82673 #include <linux/magic.h>
82674 @@ -243,6 +244,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
82675 struct page *page, *page_head;
82676 int err, ro = 0;
82677
82678 +#ifdef CONFIG_PAX_SEGMEXEC
82679 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
82680 + return -EFAULT;
82681 +#endif
82682 +
82683 /*
82684 * The futex address must be "naturally" aligned.
82685 */
82686 @@ -441,7 +447,7 @@ static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr,
82687
82688 static int get_futex_value_locked(u32 *dest, u32 __user *from)
82689 {
82690 - int ret;
82691 + unsigned long ret;
82692
82693 pagefault_disable();
82694 ret = __copy_from_user_inatomic(dest, from, sizeof(u32));
82695 @@ -2734,6 +2740,7 @@ static int __init futex_init(void)
82696 {
82697 u32 curval;
82698 int i;
82699 + mm_segment_t oldfs;
82700
82701 /*
82702 * This will fail and we want it. Some arch implementations do
82703 @@ -2745,8 +2752,11 @@ static int __init futex_init(void)
82704 * implementation, the non-functional ones will return
82705 * -ENOSYS.
82706 */
82707 + oldfs = get_fs();
82708 + set_fs(USER_DS);
82709 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
82710 futex_cmpxchg_enabled = 1;
82711 + set_fs(oldfs);
82712
82713 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
82714 plist_head_init(&futex_queues[i].chain);
82715 diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
82716 index f9f44fd..29885e4 100644
82717 --- a/kernel/futex_compat.c
82718 +++ b/kernel/futex_compat.c
82719 @@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
82720 return 0;
82721 }
82722
82723 -static void __user *futex_uaddr(struct robust_list __user *entry,
82724 +static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
82725 compat_long_t futex_offset)
82726 {
82727 compat_uptr_t base = ptr_to_compat(entry);
82728 diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
82729 index 9b22d03..6295b62 100644
82730 --- a/kernel/gcov/base.c
82731 +++ b/kernel/gcov/base.c
82732 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
82733 }
82734
82735 #ifdef CONFIG_MODULES
82736 -static inline int within(void *addr, void *start, unsigned long size)
82737 -{
82738 - return ((addr >= start) && (addr < start + size));
82739 -}
82740 -
82741 /* Update list and generate events when modules are unloaded. */
82742 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
82743 void *data)
82744 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
82745 prev = NULL;
82746 /* Remove entries located in module from linked list. */
82747 for (info = gcov_info_head; info; info = info->next) {
82748 - if (within(info, mod->module_core, mod->core_size)) {
82749 + if (within_module_core_rw((unsigned long)info, mod)) {
82750 if (prev)
82751 prev->next = info->next;
82752 else
82753 diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
82754 index 383319b..56ebb13 100644
82755 --- a/kernel/hrtimer.c
82756 +++ b/kernel/hrtimer.c
82757 @@ -1438,7 +1438,7 @@ void hrtimer_peek_ahead_timers(void)
82758 local_irq_restore(flags);
82759 }
82760
82761 -static void run_hrtimer_softirq(struct softirq_action *h)
82762 +static __latent_entropy void run_hrtimer_softirq(void)
82763 {
82764 hrtimer_peek_ahead_timers();
82765 }
82766 diff --git a/kernel/irq_work.c b/kernel/irq_work.c
82767 index 55fcce6..0e4cf34 100644
82768 --- a/kernel/irq_work.c
82769 +++ b/kernel/irq_work.c
82770 @@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
82771 return NOTIFY_OK;
82772 }
82773
82774 -static struct notifier_block cpu_notify;
82775 +static struct notifier_block cpu_notify = {
82776 + .notifier_call = irq_work_cpu_notify,
82777 + .priority = 0,
82778 +};
82779
82780 static __init int irq_work_init_cpu_notifier(void)
82781 {
82782 - cpu_notify.notifier_call = irq_work_cpu_notify;
82783 - cpu_notify.priority = 0;
82784 register_cpu_notifier(&cpu_notify);
82785 return 0;
82786 }
82787 diff --git a/kernel/jump_label.c b/kernel/jump_label.c
82788 index 297a924..7290070 100644
82789 --- a/kernel/jump_label.c
82790 +++ b/kernel/jump_label.c
82791 @@ -14,6 +14,7 @@
82792 #include <linux/err.h>
82793 #include <linux/static_key.h>
82794 #include <linux/jump_label_ratelimit.h>
82795 +#include <linux/mm.h>
82796
82797 #ifdef HAVE_JUMP_LABEL
82798
82799 @@ -51,7 +52,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
82800
82801 size = (((unsigned long)stop - (unsigned long)start)
82802 / sizeof(struct jump_entry));
82803 + pax_open_kernel();
82804 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
82805 + pax_close_kernel();
82806 }
82807
82808 static void jump_label_update(struct static_key *key, int enable);
82809 @@ -358,10 +361,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
82810 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
82811 struct jump_entry *iter;
82812
82813 + pax_open_kernel();
82814 for (iter = iter_start; iter < iter_stop; iter++) {
82815 if (within_module_init(iter->code, mod))
82816 iter->code = 0;
82817 }
82818 + pax_close_kernel();
82819 }
82820
82821 static int
82822 diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
82823 index 3127ad5..159d880 100644
82824 --- a/kernel/kallsyms.c
82825 +++ b/kernel/kallsyms.c
82826 @@ -11,6 +11,9 @@
82827 * Changed the compression method from stem compression to "table lookup"
82828 * compression (see scripts/kallsyms.c for a more complete description)
82829 */
82830 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82831 +#define __INCLUDED_BY_HIDESYM 1
82832 +#endif
82833 #include <linux/kallsyms.h>
82834 #include <linux/module.h>
82835 #include <linux/init.h>
82836 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
82837
82838 static inline int is_kernel_inittext(unsigned long addr)
82839 {
82840 + if (system_state != SYSTEM_BOOTING)
82841 + return 0;
82842 +
82843 if (addr >= (unsigned long)_sinittext
82844 && addr <= (unsigned long)_einittext)
82845 return 1;
82846 return 0;
82847 }
82848
82849 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
82850 +#ifdef CONFIG_MODULES
82851 +static inline int is_module_text(unsigned long addr)
82852 +{
82853 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
82854 + return 1;
82855 +
82856 + addr = ktla_ktva(addr);
82857 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
82858 +}
82859 +#else
82860 +static inline int is_module_text(unsigned long addr)
82861 +{
82862 + return 0;
82863 +}
82864 +#endif
82865 +#endif
82866 +
82867 static inline int is_kernel_text(unsigned long addr)
82868 {
82869 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
82870 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
82871
82872 static inline int is_kernel(unsigned long addr)
82873 {
82874 +
82875 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
82876 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
82877 + return 1;
82878 +
82879 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
82880 +#else
82881 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
82882 +#endif
82883 +
82884 return 1;
82885 return in_gate_area_no_mm(addr);
82886 }
82887
82888 static int is_ksym_addr(unsigned long addr)
82889 {
82890 +
82891 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
82892 + if (is_module_text(addr))
82893 + return 0;
82894 +#endif
82895 +
82896 if (all_var)
82897 return is_kernel(addr);
82898
82899 @@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
82900
82901 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
82902 {
82903 - iter->name[0] = '\0';
82904 iter->nameoff = get_symbol_offset(new_pos);
82905 iter->pos = new_pos;
82906 }
82907 @@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
82908 {
82909 struct kallsym_iter *iter = m->private;
82910
82911 +#ifdef CONFIG_GRKERNSEC_HIDESYM
82912 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
82913 + return 0;
82914 +#endif
82915 +
82916 /* Some debugging symbols have no name. Ignore them. */
82917 if (!iter->name[0])
82918 return 0;
82919 @@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
82920 */
82921 type = iter->exported ? toupper(iter->type) :
82922 tolower(iter->type);
82923 +
82924 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
82925 type, iter->name, iter->module_name);
82926 } else
82927 @@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
82928 struct kallsym_iter *iter;
82929 int ret;
82930
82931 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
82932 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
82933 if (!iter)
82934 return -ENOMEM;
82935 reset_iter(iter, 0);
82936 diff --git a/kernel/kcmp.c b/kernel/kcmp.c
82937 index e30ac0f..3528cac 100644
82938 --- a/kernel/kcmp.c
82939 +++ b/kernel/kcmp.c
82940 @@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
82941 struct task_struct *task1, *task2;
82942 int ret;
82943
82944 +#ifdef CONFIG_GRKERNSEC
82945 + return -ENOSYS;
82946 +#endif
82947 +
82948 rcu_read_lock();
82949
82950 /*
82951 diff --git a/kernel/kexec.c b/kernel/kexec.c
82952 index ecd783d..9aa270c 100644
82953 --- a/kernel/kexec.c
82954 +++ b/kernel/kexec.c
82955 @@ -1044,7 +1044,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
82956 unsigned long flags)
82957 {
82958 struct compat_kexec_segment in;
82959 - struct kexec_segment out, __user *ksegments;
82960 + struct kexec_segment out;
82961 + struct kexec_segment __user *ksegments;
82962 unsigned long i, result;
82963
82964 /* Don't allow clients that don't understand the native
82965 diff --git a/kernel/kmod.c b/kernel/kmod.c
82966 index b086006..6d2e579 100644
82967 --- a/kernel/kmod.c
82968 +++ b/kernel/kmod.c
82969 @@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
82970 kfree(info->argv);
82971 }
82972
82973 -static int call_modprobe(char *module_name, int wait)
82974 +static int call_modprobe(char *module_name, char *module_param, int wait)
82975 {
82976 struct subprocess_info *info;
82977 static char *envp[] = {
82978 @@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
82979 NULL
82980 };
82981
82982 - char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
82983 + char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
82984 if (!argv)
82985 goto out;
82986
82987 @@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
82988 argv[1] = "-q";
82989 argv[2] = "--";
82990 argv[3] = module_name; /* check free_modprobe_argv() */
82991 - argv[4] = NULL;
82992 + argv[4] = module_param;
82993 + argv[5] = NULL;
82994
82995 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
82996 NULL, free_modprobe_argv, NULL);
82997 @@ -129,9 +130,8 @@ out:
82998 * If module auto-loading support is disabled then this function
82999 * becomes a no-operation.
83000 */
83001 -int __request_module(bool wait, const char *fmt, ...)
83002 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
83003 {
83004 - va_list args;
83005 char module_name[MODULE_NAME_LEN];
83006 unsigned int max_modprobes;
83007 int ret;
83008 @@ -150,9 +150,7 @@ int __request_module(bool wait, const char *fmt, ...)
83009 if (!modprobe_path[0])
83010 return 0;
83011
83012 - va_start(args, fmt);
83013 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
83014 - va_end(args);
83015 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
83016 if (ret >= MODULE_NAME_LEN)
83017 return -ENAMETOOLONG;
83018
83019 @@ -160,6 +158,20 @@ int __request_module(bool wait, const char *fmt, ...)
83020 if (ret)
83021 return ret;
83022
83023 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
83024 + if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
83025 + /* hack to workaround consolekit/udisks stupidity */
83026 + read_lock(&tasklist_lock);
83027 + if (!strcmp(current->comm, "mount") &&
83028 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
83029 + read_unlock(&tasklist_lock);
83030 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
83031 + return -EPERM;
83032 + }
83033 + read_unlock(&tasklist_lock);
83034 + }
83035 +#endif
83036 +
83037 /* If modprobe needs a service that is in a module, we get a recursive
83038 * loop. Limit the number of running kmod threads to max_threads/2 or
83039 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
83040 @@ -188,11 +200,52 @@ int __request_module(bool wait, const char *fmt, ...)
83041
83042 trace_module_request(module_name, wait, _RET_IP_);
83043
83044 - ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
83045 + ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
83046
83047 atomic_dec(&kmod_concurrent);
83048 return ret;
83049 }
83050 +
83051 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
83052 +{
83053 + va_list args;
83054 + int ret;
83055 +
83056 + va_start(args, fmt);
83057 + ret = ____request_module(wait, module_param, fmt, args);
83058 + va_end(args);
83059 +
83060 + return ret;
83061 +}
83062 +
83063 +int __request_module(bool wait, const char *fmt, ...)
83064 +{
83065 + va_list args;
83066 + int ret;
83067 +
83068 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
83069 + if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
83070 + char module_param[MODULE_NAME_LEN];
83071 +
83072 + memset(module_param, 0, sizeof(module_param));
83073 +
83074 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
83075 +
83076 + va_start(args, fmt);
83077 + ret = ____request_module(wait, module_param, fmt, args);
83078 + va_end(args);
83079 +
83080 + return ret;
83081 + }
83082 +#endif
83083 +
83084 + va_start(args, fmt);
83085 + ret = ____request_module(wait, NULL, fmt, args);
83086 + va_end(args);
83087 +
83088 + return ret;
83089 +}
83090 +
83091 EXPORT_SYMBOL(__request_module);
83092 #endif /* CONFIG_MODULES */
83093
83094 @@ -218,6 +271,19 @@ static int ____call_usermodehelper(void *data)
83095 */
83096 set_user_nice(current, 0);
83097
83098 +#ifdef CONFIG_GRKERNSEC
83099 + /* this is race-free as far as userland is concerned as we copied
83100 + out the path to be used prior to this point and are now operating
83101 + on that copy
83102 + */
83103 + if ((strncmp(sub_info->path, "/sbin/", 6) && strncmp(sub_info->path, "/usr/lib/", 9) &&
83104 + strncmp(sub_info->path, "/lib/", 5) && strncmp(sub_info->path, "/lib64/", 7)) || strstr(sub_info->path, "..")) {
83105 + printk(KERN_ALERT "grsec: denied exec of usermode helper binary %.950s located outside of /sbin and system library paths\n", sub_info->path);
83106 + retval = -EPERM;
83107 + goto fail;
83108 + }
83109 +#endif
83110 +
83111 retval = -ENOMEM;
83112 new = prepare_kernel_cred(current);
83113 if (!new)
83114 @@ -260,6 +326,10 @@ static int call_helper(void *data)
83115
83116 static void call_usermodehelper_freeinfo(struct subprocess_info *info)
83117 {
83118 +#ifdef CONFIG_GRKERNSEC
83119 + kfree(info->path);
83120 + info->path = info->origpath;
83121 +#endif
83122 if (info->cleanup)
83123 (*info->cleanup)(info);
83124 kfree(info);
83125 @@ -303,7 +373,7 @@ static int wait_for_helper(void *data)
83126 *
83127 * Thus the __user pointer cast is valid here.
83128 */
83129 - sys_wait4(pid, (int __user *)&ret, 0, NULL);
83130 + sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
83131
83132 /*
83133 * If ret is 0, either ____call_usermodehelper failed and the
83134 @@ -542,7 +612,12 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
83135 goto out;
83136
83137 INIT_WORK(&sub_info->work, __call_usermodehelper);
83138 +#ifdef CONFIG_GRKERNSEC
83139 + sub_info->origpath = path;
83140 + sub_info->path = kstrdup(path, gfp_mask);
83141 +#else
83142 sub_info->path = path;
83143 +#endif
83144 sub_info->argv = argv;
83145 sub_info->envp = envp;
83146
83147 @@ -650,7 +725,7 @@ EXPORT_SYMBOL(call_usermodehelper);
83148 static int proc_cap_handler(struct ctl_table *table, int write,
83149 void __user *buffer, size_t *lenp, loff_t *ppos)
83150 {
83151 - struct ctl_table t;
83152 + ctl_table_no_const t;
83153 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
83154 kernel_cap_t new_cap;
83155 int err, i;
83156 diff --git a/kernel/kprobes.c b/kernel/kprobes.c
83157 index a0d367a..11c18b6 100644
83158 --- a/kernel/kprobes.c
83159 +++ b/kernel/kprobes.c
83160 @@ -31,6 +31,9 @@
83161 * <jkenisto@us.ibm.com> and Prasanna S Panchamukhi
83162 * <prasanna@in.ibm.com> added function-return probes.
83163 */
83164 +#ifdef CONFIG_GRKERNSEC_HIDESYM
83165 +#define __INCLUDED_BY_HIDESYM 1
83166 +#endif
83167 #include <linux/kprobes.h>
83168 #include <linux/hash.h>
83169 #include <linux/init.h>
83170 @@ -135,12 +138,12 @@ enum kprobe_slot_state {
83171
83172 static void *alloc_insn_page(void)
83173 {
83174 - return module_alloc(PAGE_SIZE);
83175 + return module_alloc_exec(PAGE_SIZE);
83176 }
83177
83178 static void free_insn_page(void *page)
83179 {
83180 - module_free(NULL, page);
83181 + module_free_exec(NULL, page);
83182 }
83183
83184 struct kprobe_insn_cache kprobe_insn_slots = {
83185 @@ -2066,7 +2069,7 @@ static int __init init_kprobes(void)
83186 {
83187 int i, err = 0;
83188 unsigned long offset = 0, size = 0;
83189 - char *modname, namebuf[128];
83190 + char *modname, namebuf[KSYM_NAME_LEN];
83191 const char *symbol_name;
83192 void *addr;
83193 struct kprobe_blackpoint *kb;
83194 @@ -2151,11 +2154,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
83195 kprobe_type = "k";
83196
83197 if (sym)
83198 - seq_printf(pi, "%p %s %s+0x%x %s ",
83199 + seq_printf(pi, "%pK %s %s+0x%x %s ",
83200 p->addr, kprobe_type, sym, offset,
83201 (modname ? modname : " "));
83202 else
83203 - seq_printf(pi, "%p %s %p ",
83204 + seq_printf(pi, "%pK %s %pK ",
83205 p->addr, kprobe_type, p->addr);
83206
83207 if (!pp)
83208 @@ -2192,7 +2195,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
83209 const char *sym = NULL;
83210 unsigned int i = *(loff_t *) v;
83211 unsigned long offset = 0;
83212 - char *modname, namebuf[128];
83213 + char *modname, namebuf[KSYM_NAME_LEN];
83214
83215 head = &kprobe_table[i];
83216 preempt_disable();
83217 diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
83218 index 9659d38..bffd520 100644
83219 --- a/kernel/ksysfs.c
83220 +++ b/kernel/ksysfs.c
83221 @@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
83222 {
83223 if (count+1 > UEVENT_HELPER_PATH_LEN)
83224 return -ENOENT;
83225 + if (!capable(CAP_SYS_ADMIN))
83226 + return -EPERM;
83227 memcpy(uevent_helper, buf, count);
83228 uevent_helper[count] = '\0';
83229 if (count && uevent_helper[count-1] == '\n')
83230 @@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
83231 return count;
83232 }
83233
83234 -static struct bin_attribute notes_attr = {
83235 +static bin_attribute_no_const notes_attr __read_only = {
83236 .attr = {
83237 .name = "notes",
83238 .mode = S_IRUGO,
83239 diff --git a/kernel/lockdep.c b/kernel/lockdep.c
83240 index e16c45b..6f49c48 100644
83241 --- a/kernel/lockdep.c
83242 +++ b/kernel/lockdep.c
83243 @@ -596,6 +596,10 @@ static int static_obj(void *obj)
83244 end = (unsigned long) &_end,
83245 addr = (unsigned long) obj;
83246
83247 +#ifdef CONFIG_PAX_KERNEXEC
83248 + start = ktla_ktva(start);
83249 +#endif
83250 +
83251 /*
83252 * static variable?
83253 */
83254 @@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
83255 if (!static_obj(lock->key)) {
83256 debug_locks_off();
83257 printk("INFO: trying to register non-static key.\n");
83258 + printk("lock:%pS key:%pS.\n", lock, lock->key);
83259 printk("the code is fine but needs lockdep annotation.\n");
83260 printk("turning off the locking correctness validator.\n");
83261 dump_stack();
83262 @@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
83263 if (!class)
83264 return 0;
83265 }
83266 - atomic_inc((atomic_t *)&class->ops);
83267 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
83268 if (very_verbose(class)) {
83269 printk("\nacquire class [%p] %s", class->key, class->name);
83270 if (class->name_version > 1)
83271 diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
83272 index b2c71c5..7b88d63 100644
83273 --- a/kernel/lockdep_proc.c
83274 +++ b/kernel/lockdep_proc.c
83275 @@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
83276 return 0;
83277 }
83278
83279 - seq_printf(m, "%p", class->key);
83280 + seq_printf(m, "%pK", class->key);
83281 #ifdef CONFIG_DEBUG_LOCKDEP
83282 seq_printf(m, " OPS:%8ld", class->ops);
83283 #endif
83284 @@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
83285
83286 list_for_each_entry(entry, &class->locks_after, entry) {
83287 if (entry->distance == 1) {
83288 - seq_printf(m, " -> [%p] ", entry->class->key);
83289 + seq_printf(m, " -> [%pK] ", entry->class->key);
83290 print_name(m, entry->class);
83291 seq_puts(m, "\n");
83292 }
83293 @@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
83294 if (!class->key)
83295 continue;
83296
83297 - seq_printf(m, "[%p] ", class->key);
83298 + seq_printf(m, "[%pK] ", class->key);
83299 print_name(m, class);
83300 seq_puts(m, "\n");
83301 }
83302 @@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
83303 if (!i)
83304 seq_line(m, '-', 40-namelen, namelen);
83305
83306 - snprintf(ip, sizeof(ip), "[<%p>]",
83307 + snprintf(ip, sizeof(ip), "[<%pK>]",
83308 (void *)class->contention_point[i]);
83309 seq_printf(m, "%40s %14lu %29s %pS\n",
83310 name, stats->contention_point[i],
83311 @@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
83312 if (!i)
83313 seq_line(m, '-', 40-namelen, namelen);
83314
83315 - snprintf(ip, sizeof(ip), "[<%p>]",
83316 + snprintf(ip, sizeof(ip), "[<%pK>]",
83317 (void *)class->contending_point[i]);
83318 seq_printf(m, "%40s %14lu %29s %pS\n",
83319 name, stats->contending_point[i],
83320 diff --git a/kernel/module.c b/kernel/module.c
83321 index dc58274..3ddfa55 100644
83322 --- a/kernel/module.c
83323 +++ b/kernel/module.c
83324 @@ -61,6 +61,7 @@
83325 #include <linux/pfn.h>
83326 #include <linux/bsearch.h>
83327 #include <linux/fips.h>
83328 +#include <linux/grsecurity.h>
83329 #include <uapi/linux/module.h>
83330 #include "module-internal.h"
83331
83332 @@ -157,7 +158,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
83333
83334 /* Bounds of module allocation, for speeding __module_address.
83335 * Protected by module_mutex. */
83336 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
83337 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
83338 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
83339
83340 int register_module_notifier(struct notifier_block * nb)
83341 {
83342 @@ -324,7 +326,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
83343 return true;
83344
83345 list_for_each_entry_rcu(mod, &modules, list) {
83346 - struct symsearch arr[] = {
83347 + struct symsearch modarr[] = {
83348 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
83349 NOT_GPL_ONLY, false },
83350 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
83351 @@ -349,7 +351,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
83352 if (mod->state == MODULE_STATE_UNFORMED)
83353 continue;
83354
83355 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
83356 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
83357 return true;
83358 }
83359 return false;
83360 @@ -491,7 +493,7 @@ static int percpu_modalloc(struct module *mod, struct load_info *info)
83361 if (!pcpusec->sh_size)
83362 return 0;
83363
83364 - if (align > PAGE_SIZE) {
83365 + if (align-1 >= PAGE_SIZE) {
83366 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
83367 mod->name, align, PAGE_SIZE);
83368 align = PAGE_SIZE;
83369 @@ -1097,7 +1099,7 @@ struct module_attribute module_uevent =
83370 static ssize_t show_coresize(struct module_attribute *mattr,
83371 struct module_kobject *mk, char *buffer)
83372 {
83373 - return sprintf(buffer, "%u\n", mk->mod->core_size);
83374 + return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
83375 }
83376
83377 static struct module_attribute modinfo_coresize =
83378 @@ -1106,7 +1108,7 @@ static struct module_attribute modinfo_coresize =
83379 static ssize_t show_initsize(struct module_attribute *mattr,
83380 struct module_kobject *mk, char *buffer)
83381 {
83382 - return sprintf(buffer, "%u\n", mk->mod->init_size);
83383 + return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
83384 }
83385
83386 static struct module_attribute modinfo_initsize =
83387 @@ -1321,7 +1323,7 @@ resolve_symbol_wait(struct module *mod,
83388 */
83389 #ifdef CONFIG_SYSFS
83390
83391 -#ifdef CONFIG_KALLSYMS
83392 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
83393 static inline bool sect_empty(const Elf_Shdr *sect)
83394 {
83395 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
83396 @@ -1461,7 +1463,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
83397 {
83398 unsigned int notes, loaded, i;
83399 struct module_notes_attrs *notes_attrs;
83400 - struct bin_attribute *nattr;
83401 + bin_attribute_no_const *nattr;
83402
83403 /* failed to create section attributes, so can't create notes */
83404 if (!mod->sect_attrs)
83405 @@ -1573,7 +1575,7 @@ static void del_usage_links(struct module *mod)
83406 static int module_add_modinfo_attrs(struct module *mod)
83407 {
83408 struct module_attribute *attr;
83409 - struct module_attribute *temp_attr;
83410 + module_attribute_no_const *temp_attr;
83411 int error = 0;
83412 int i;
83413
83414 @@ -1795,21 +1797,21 @@ static void set_section_ro_nx(void *base,
83415
83416 static void unset_module_core_ro_nx(struct module *mod)
83417 {
83418 - set_page_attributes(mod->module_core + mod->core_text_size,
83419 - mod->module_core + mod->core_size,
83420 + set_page_attributes(mod->module_core_rw,
83421 + mod->module_core_rw + mod->core_size_rw,
83422 set_memory_x);
83423 - set_page_attributes(mod->module_core,
83424 - mod->module_core + mod->core_ro_size,
83425 + set_page_attributes(mod->module_core_rx,
83426 + mod->module_core_rx + mod->core_size_rx,
83427 set_memory_rw);
83428 }
83429
83430 static void unset_module_init_ro_nx(struct module *mod)
83431 {
83432 - set_page_attributes(mod->module_init + mod->init_text_size,
83433 - mod->module_init + mod->init_size,
83434 + set_page_attributes(mod->module_init_rw,
83435 + mod->module_init_rw + mod->init_size_rw,
83436 set_memory_x);
83437 - set_page_attributes(mod->module_init,
83438 - mod->module_init + mod->init_ro_size,
83439 + set_page_attributes(mod->module_init_rx,
83440 + mod->module_init_rx + mod->init_size_rx,
83441 set_memory_rw);
83442 }
83443
83444 @@ -1822,14 +1824,14 @@ void set_all_modules_text_rw(void)
83445 list_for_each_entry_rcu(mod, &modules, list) {
83446 if (mod->state == MODULE_STATE_UNFORMED)
83447 continue;
83448 - if ((mod->module_core) && (mod->core_text_size)) {
83449 - set_page_attributes(mod->module_core,
83450 - mod->module_core + mod->core_text_size,
83451 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
83452 + set_page_attributes(mod->module_core_rx,
83453 + mod->module_core_rx + mod->core_size_rx,
83454 set_memory_rw);
83455 }
83456 - if ((mod->module_init) && (mod->init_text_size)) {
83457 - set_page_attributes(mod->module_init,
83458 - mod->module_init + mod->init_text_size,
83459 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
83460 + set_page_attributes(mod->module_init_rx,
83461 + mod->module_init_rx + mod->init_size_rx,
83462 set_memory_rw);
83463 }
83464 }
83465 @@ -1845,14 +1847,14 @@ void set_all_modules_text_ro(void)
83466 list_for_each_entry_rcu(mod, &modules, list) {
83467 if (mod->state == MODULE_STATE_UNFORMED)
83468 continue;
83469 - if ((mod->module_core) && (mod->core_text_size)) {
83470 - set_page_attributes(mod->module_core,
83471 - mod->module_core + mod->core_text_size,
83472 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
83473 + set_page_attributes(mod->module_core_rx,
83474 + mod->module_core_rx + mod->core_size_rx,
83475 set_memory_ro);
83476 }
83477 - if ((mod->module_init) && (mod->init_text_size)) {
83478 - set_page_attributes(mod->module_init,
83479 - mod->module_init + mod->init_text_size,
83480 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
83481 + set_page_attributes(mod->module_init_rx,
83482 + mod->module_init_rx + mod->init_size_rx,
83483 set_memory_ro);
83484 }
83485 }
83486 @@ -1903,16 +1905,19 @@ static void free_module(struct module *mod)
83487
83488 /* This may be NULL, but that's OK */
83489 unset_module_init_ro_nx(mod);
83490 - module_free(mod, mod->module_init);
83491 + module_free(mod, mod->module_init_rw);
83492 + module_free_exec(mod, mod->module_init_rx);
83493 kfree(mod->args);
83494 percpu_modfree(mod);
83495
83496 /* Free lock-classes: */
83497 - lockdep_free_key_range(mod->module_core, mod->core_size);
83498 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
83499 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
83500
83501 /* Finally, free the core (containing the module structure) */
83502 unset_module_core_ro_nx(mod);
83503 - module_free(mod, mod->module_core);
83504 + module_free_exec(mod, mod->module_core_rx);
83505 + module_free(mod, mod->module_core_rw);
83506
83507 #ifdef CONFIG_MPU
83508 update_protections(current->mm);
83509 @@ -1982,9 +1987,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
83510 int ret = 0;
83511 const struct kernel_symbol *ksym;
83512
83513 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
83514 + int is_fs_load = 0;
83515 + int register_filesystem_found = 0;
83516 + char *p;
83517 +
83518 + p = strstr(mod->args, "grsec_modharden_fs");
83519 + if (p) {
83520 + char *endptr = p + sizeof("grsec_modharden_fs") - 1;
83521 + /* copy \0 as well */
83522 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
83523 + is_fs_load = 1;
83524 + }
83525 +#endif
83526 +
83527 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
83528 const char *name = info->strtab + sym[i].st_name;
83529
83530 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
83531 + /* it's a real shame this will never get ripped and copied
83532 + upstream! ;(
83533 + */
83534 + if (is_fs_load && !strcmp(name, "register_filesystem"))
83535 + register_filesystem_found = 1;
83536 +#endif
83537 +
83538 switch (sym[i].st_shndx) {
83539 case SHN_COMMON:
83540 /* We compiled with -fno-common. These are not
83541 @@ -2005,7 +2032,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
83542 ksym = resolve_symbol_wait(mod, info, name);
83543 /* Ok if resolved. */
83544 if (ksym && !IS_ERR(ksym)) {
83545 + pax_open_kernel();
83546 sym[i].st_value = ksym->value;
83547 + pax_close_kernel();
83548 break;
83549 }
83550
83551 @@ -2024,11 +2053,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
83552 secbase = (unsigned long)mod_percpu(mod);
83553 else
83554 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
83555 + pax_open_kernel();
83556 sym[i].st_value += secbase;
83557 + pax_close_kernel();
83558 break;
83559 }
83560 }
83561
83562 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
83563 + if (is_fs_load && !register_filesystem_found) {
83564 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
83565 + ret = -EPERM;
83566 + }
83567 +#endif
83568 +
83569 return ret;
83570 }
83571
83572 @@ -2112,22 +2150,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
83573 || s->sh_entsize != ~0UL
83574 || strstarts(sname, ".init"))
83575 continue;
83576 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
83577 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
83578 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
83579 + else
83580 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
83581 pr_debug("\t%s\n", sname);
83582 }
83583 - switch (m) {
83584 - case 0: /* executable */
83585 - mod->core_size = debug_align(mod->core_size);
83586 - mod->core_text_size = mod->core_size;
83587 - break;
83588 - case 1: /* RO: text and ro-data */
83589 - mod->core_size = debug_align(mod->core_size);
83590 - mod->core_ro_size = mod->core_size;
83591 - break;
83592 - case 3: /* whole core */
83593 - mod->core_size = debug_align(mod->core_size);
83594 - break;
83595 - }
83596 }
83597
83598 pr_debug("Init section allocation order:\n");
83599 @@ -2141,23 +2169,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
83600 || s->sh_entsize != ~0UL
83601 || !strstarts(sname, ".init"))
83602 continue;
83603 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
83604 - | INIT_OFFSET_MASK);
83605 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
83606 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
83607 + else
83608 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
83609 + s->sh_entsize |= INIT_OFFSET_MASK;
83610 pr_debug("\t%s\n", sname);
83611 }
83612 - switch (m) {
83613 - case 0: /* executable */
83614 - mod->init_size = debug_align(mod->init_size);
83615 - mod->init_text_size = mod->init_size;
83616 - break;
83617 - case 1: /* RO: text and ro-data */
83618 - mod->init_size = debug_align(mod->init_size);
83619 - mod->init_ro_size = mod->init_size;
83620 - break;
83621 - case 3: /* whole init */
83622 - mod->init_size = debug_align(mod->init_size);
83623 - break;
83624 - }
83625 }
83626 }
83627
83628 @@ -2330,7 +2348,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
83629
83630 /* Put symbol section at end of init part of module. */
83631 symsect->sh_flags |= SHF_ALLOC;
83632 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
83633 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
83634 info->index.sym) | INIT_OFFSET_MASK;
83635 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
83636
83637 @@ -2347,13 +2365,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
83638 }
83639
83640 /* Append room for core symbols at end of core part. */
83641 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
83642 - info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
83643 - mod->core_size += strtab_size;
83644 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
83645 + info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
83646 + mod->core_size_rx += strtab_size;
83647
83648 /* Put string table section at end of init part of module. */
83649 strsect->sh_flags |= SHF_ALLOC;
83650 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
83651 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
83652 info->index.str) | INIT_OFFSET_MASK;
83653 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
83654 }
83655 @@ -2371,12 +2389,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
83656 /* Make sure we get permanent strtab: don't use info->strtab. */
83657 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
83658
83659 + pax_open_kernel();
83660 +
83661 /* Set types up while we still have access to sections. */
83662 for (i = 0; i < mod->num_symtab; i++)
83663 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
83664
83665 - mod->core_symtab = dst = mod->module_core + info->symoffs;
83666 - mod->core_strtab = s = mod->module_core + info->stroffs;
83667 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
83668 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
83669 src = mod->symtab;
83670 for (ndst = i = 0; i < mod->num_symtab; i++) {
83671 if (i == 0 ||
83672 @@ -2388,6 +2408,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
83673 }
83674 }
83675 mod->core_num_syms = ndst;
83676 +
83677 + pax_close_kernel();
83678 }
83679 #else
83680 static inline void layout_symtab(struct module *mod, struct load_info *info)
83681 @@ -2421,17 +2443,33 @@ void * __weak module_alloc(unsigned long size)
83682 return vmalloc_exec(size);
83683 }
83684
83685 -static void *module_alloc_update_bounds(unsigned long size)
83686 +static void *module_alloc_update_bounds_rw(unsigned long size)
83687 {
83688 void *ret = module_alloc(size);
83689
83690 if (ret) {
83691 mutex_lock(&module_mutex);
83692 /* Update module bounds. */
83693 - if ((unsigned long)ret < module_addr_min)
83694 - module_addr_min = (unsigned long)ret;
83695 - if ((unsigned long)ret + size > module_addr_max)
83696 - module_addr_max = (unsigned long)ret + size;
83697 + if ((unsigned long)ret < module_addr_min_rw)
83698 + module_addr_min_rw = (unsigned long)ret;
83699 + if ((unsigned long)ret + size > module_addr_max_rw)
83700 + module_addr_max_rw = (unsigned long)ret + size;
83701 + mutex_unlock(&module_mutex);
83702 + }
83703 + return ret;
83704 +}
83705 +
83706 +static void *module_alloc_update_bounds_rx(unsigned long size)
83707 +{
83708 + void *ret = module_alloc_exec(size);
83709 +
83710 + if (ret) {
83711 + mutex_lock(&module_mutex);
83712 + /* Update module bounds. */
83713 + if ((unsigned long)ret < module_addr_min_rx)
83714 + module_addr_min_rx = (unsigned long)ret;
83715 + if ((unsigned long)ret + size > module_addr_max_rx)
83716 + module_addr_max_rx = (unsigned long)ret + size;
83717 mutex_unlock(&module_mutex);
83718 }
83719 return ret;
83720 @@ -2706,8 +2744,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
83721 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
83722 {
83723 const char *modmagic = get_modinfo(info, "vermagic");
83724 + const char *license = get_modinfo(info, "license");
83725 int err;
83726
83727 +#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
83728 + if (!license || !license_is_gpl_compatible(license))
83729 + return -ENOEXEC;
83730 +#endif
83731 +
83732 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
83733 modmagic = NULL;
83734
83735 @@ -2733,7 +2777,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
83736 }
83737
83738 /* Set up license info based on the info section */
83739 - set_license(mod, get_modinfo(info, "license"));
83740 + set_license(mod, license);
83741
83742 return 0;
83743 }
83744 @@ -2814,7 +2858,7 @@ static int move_module(struct module *mod, struct load_info *info)
83745 void *ptr;
83746
83747 /* Do the allocs. */
83748 - ptr = module_alloc_update_bounds(mod->core_size);
83749 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
83750 /*
83751 * The pointer to this block is stored in the module structure
83752 * which is inside the block. Just mark it as not being a
83753 @@ -2824,11 +2868,11 @@ static int move_module(struct module *mod, struct load_info *info)
83754 if (!ptr)
83755 return -ENOMEM;
83756
83757 - memset(ptr, 0, mod->core_size);
83758 - mod->module_core = ptr;
83759 + memset(ptr, 0, mod->core_size_rw);
83760 + mod->module_core_rw = ptr;
83761
83762 - if (mod->init_size) {
83763 - ptr = module_alloc_update_bounds(mod->init_size);
83764 + if (mod->init_size_rw) {
83765 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
83766 /*
83767 * The pointer to this block is stored in the module structure
83768 * which is inside the block. This block doesn't need to be
83769 @@ -2837,13 +2881,45 @@ static int move_module(struct module *mod, struct load_info *info)
83770 */
83771 kmemleak_ignore(ptr);
83772 if (!ptr) {
83773 - module_free(mod, mod->module_core);
83774 + module_free(mod, mod->module_core_rw);
83775 return -ENOMEM;
83776 }
83777 - memset(ptr, 0, mod->init_size);
83778 - mod->module_init = ptr;
83779 + memset(ptr, 0, mod->init_size_rw);
83780 + mod->module_init_rw = ptr;
83781 } else
83782 - mod->module_init = NULL;
83783 + mod->module_init_rw = NULL;
83784 +
83785 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
83786 + kmemleak_not_leak(ptr);
83787 + if (!ptr) {
83788 + if (mod->module_init_rw)
83789 + module_free(mod, mod->module_init_rw);
83790 + module_free(mod, mod->module_core_rw);
83791 + return -ENOMEM;
83792 + }
83793 +
83794 + pax_open_kernel();
83795 + memset(ptr, 0, mod->core_size_rx);
83796 + pax_close_kernel();
83797 + mod->module_core_rx = ptr;
83798 +
83799 + if (mod->init_size_rx) {
83800 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
83801 + kmemleak_ignore(ptr);
83802 + if (!ptr && mod->init_size_rx) {
83803 + module_free_exec(mod, mod->module_core_rx);
83804 + if (mod->module_init_rw)
83805 + module_free(mod, mod->module_init_rw);
83806 + module_free(mod, mod->module_core_rw);
83807 + return -ENOMEM;
83808 + }
83809 +
83810 + pax_open_kernel();
83811 + memset(ptr, 0, mod->init_size_rx);
83812 + pax_close_kernel();
83813 + mod->module_init_rx = ptr;
83814 + } else
83815 + mod->module_init_rx = NULL;
83816
83817 /* Transfer each section which specifies SHF_ALLOC */
83818 pr_debug("final section addresses:\n");
83819 @@ -2854,16 +2930,45 @@ static int move_module(struct module *mod, struct load_info *info)
83820 if (!(shdr->sh_flags & SHF_ALLOC))
83821 continue;
83822
83823 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
83824 - dest = mod->module_init
83825 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
83826 - else
83827 - dest = mod->module_core + shdr->sh_entsize;
83828 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
83829 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
83830 + dest = mod->module_init_rw
83831 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
83832 + else
83833 + dest = mod->module_init_rx
83834 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
83835 + } else {
83836 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
83837 + dest = mod->module_core_rw + shdr->sh_entsize;
83838 + else
83839 + dest = mod->module_core_rx + shdr->sh_entsize;
83840 + }
83841 +
83842 + if (shdr->sh_type != SHT_NOBITS) {
83843 +
83844 +#ifdef CONFIG_PAX_KERNEXEC
83845 +#ifdef CONFIG_X86_64
83846 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
83847 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
83848 +#endif
83849 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
83850 + pax_open_kernel();
83851 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
83852 + pax_close_kernel();
83853 + } else
83854 +#endif
83855
83856 - if (shdr->sh_type != SHT_NOBITS)
83857 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
83858 + }
83859 /* Update sh_addr to point to copy in image. */
83860 - shdr->sh_addr = (unsigned long)dest;
83861 +
83862 +#ifdef CONFIG_PAX_KERNEXEC
83863 + if (shdr->sh_flags & SHF_EXECINSTR)
83864 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
83865 + else
83866 +#endif
83867 +
83868 + shdr->sh_addr = (unsigned long)dest;
83869 pr_debug("\t0x%lx %s\n",
83870 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
83871 }
83872 @@ -2920,12 +3025,12 @@ static void flush_module_icache(const struct module *mod)
83873 * Do it before processing of module parameters, so the module
83874 * can provide parameter accessor functions of its own.
83875 */
83876 - if (mod->module_init)
83877 - flush_icache_range((unsigned long)mod->module_init,
83878 - (unsigned long)mod->module_init
83879 - + mod->init_size);
83880 - flush_icache_range((unsigned long)mod->module_core,
83881 - (unsigned long)mod->module_core + mod->core_size);
83882 + if (mod->module_init_rx)
83883 + flush_icache_range((unsigned long)mod->module_init_rx,
83884 + (unsigned long)mod->module_init_rx
83885 + + mod->init_size_rx);
83886 + flush_icache_range((unsigned long)mod->module_core_rx,
83887 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
83888
83889 set_fs(old_fs);
83890 }
83891 @@ -2982,8 +3087,10 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
83892 static void module_deallocate(struct module *mod, struct load_info *info)
83893 {
83894 percpu_modfree(mod);
83895 - module_free(mod, mod->module_init);
83896 - module_free(mod, mod->module_core);
83897 + module_free_exec(mod, mod->module_init_rx);
83898 + module_free_exec(mod, mod->module_core_rx);
83899 + module_free(mod, mod->module_init_rw);
83900 + module_free(mod, mod->module_core_rw);
83901 }
83902
83903 int __weak module_finalize(const Elf_Ehdr *hdr,
83904 @@ -2996,7 +3103,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
83905 static int post_relocation(struct module *mod, const struct load_info *info)
83906 {
83907 /* Sort exception table now relocations are done. */
83908 + pax_open_kernel();
83909 sort_extable(mod->extable, mod->extable + mod->num_exentries);
83910 + pax_close_kernel();
83911
83912 /* Copy relocated percpu area over. */
83913 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
83914 @@ -3050,16 +3159,16 @@ static int do_init_module(struct module *mod)
83915 MODULE_STATE_COMING, mod);
83916
83917 /* Set RO and NX regions for core */
83918 - set_section_ro_nx(mod->module_core,
83919 - mod->core_text_size,
83920 - mod->core_ro_size,
83921 - mod->core_size);
83922 + set_section_ro_nx(mod->module_core_rx,
83923 + mod->core_size_rx,
83924 + mod->core_size_rx,
83925 + mod->core_size_rx);
83926
83927 /* Set RO and NX regions for init */
83928 - set_section_ro_nx(mod->module_init,
83929 - mod->init_text_size,
83930 - mod->init_ro_size,
83931 - mod->init_size);
83932 + set_section_ro_nx(mod->module_init_rx,
83933 + mod->init_size_rx,
83934 + mod->init_size_rx,
83935 + mod->init_size_rx);
83936
83937 do_mod_ctors(mod);
83938 /* Start the module */
83939 @@ -3121,11 +3230,12 @@ static int do_init_module(struct module *mod)
83940 mod->strtab = mod->core_strtab;
83941 #endif
83942 unset_module_init_ro_nx(mod);
83943 - module_free(mod, mod->module_init);
83944 - mod->module_init = NULL;
83945 - mod->init_size = 0;
83946 - mod->init_ro_size = 0;
83947 - mod->init_text_size = 0;
83948 + module_free(mod, mod->module_init_rw);
83949 + module_free_exec(mod, mod->module_init_rx);
83950 + mod->module_init_rw = NULL;
83951 + mod->module_init_rx = NULL;
83952 + mod->init_size_rw = 0;
83953 + mod->init_size_rx = 0;
83954 mutex_unlock(&module_mutex);
83955 wake_up_all(&module_wq);
83956
83957 @@ -3269,9 +3379,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
83958 if (err)
83959 goto free_unload;
83960
83961 + /* Now copy in args */
83962 + mod->args = strndup_user(uargs, ~0UL >> 1);
83963 + if (IS_ERR(mod->args)) {
83964 + err = PTR_ERR(mod->args);
83965 + goto free_unload;
83966 + }
83967 +
83968 /* Set up MODINFO_ATTR fields */
83969 setup_modinfo(mod, info);
83970
83971 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
83972 + {
83973 + char *p, *p2;
83974 +
83975 + if (strstr(mod->args, "grsec_modharden_netdev")) {
83976 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
83977 + err = -EPERM;
83978 + goto free_modinfo;
83979 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
83980 + p += sizeof("grsec_modharden_normal") - 1;
83981 + p2 = strstr(p, "_");
83982 + if (p2) {
83983 + *p2 = '\0';
83984 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
83985 + *p2 = '_';
83986 + }
83987 + err = -EPERM;
83988 + goto free_modinfo;
83989 + }
83990 + }
83991 +#endif
83992 +
83993 /* Fix up syms, so that st_value is a pointer to location. */
83994 err = simplify_symbols(mod, info);
83995 if (err < 0)
83996 @@ -3287,13 +3426,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
83997
83998 flush_module_icache(mod);
83999
84000 - /* Now copy in args */
84001 - mod->args = strndup_user(uargs, ~0UL >> 1);
84002 - if (IS_ERR(mod->args)) {
84003 - err = PTR_ERR(mod->args);
84004 - goto free_arch_cleanup;
84005 - }
84006 -
84007 dynamic_debug_setup(info->debug, info->num_debug);
84008
84009 /* Finally it's fully formed, ready to start executing. */
84010 @@ -3328,11 +3460,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
84011 ddebug_cleanup:
84012 dynamic_debug_remove(info->debug);
84013 synchronize_sched();
84014 - kfree(mod->args);
84015 - free_arch_cleanup:
84016 module_arch_cleanup(mod);
84017 free_modinfo:
84018 free_modinfo(mod);
84019 + kfree(mod->args);
84020 free_unload:
84021 module_unload_free(mod);
84022 unlink_mod:
84023 @@ -3415,10 +3546,16 @@ static const char *get_ksymbol(struct module *mod,
84024 unsigned long nextval;
84025
84026 /* At worse, next value is at end of module */
84027 - if (within_module_init(addr, mod))
84028 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
84029 + if (within_module_init_rx(addr, mod))
84030 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
84031 + else if (within_module_init_rw(addr, mod))
84032 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
84033 + else if (within_module_core_rx(addr, mod))
84034 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
84035 + else if (within_module_core_rw(addr, mod))
84036 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
84037 else
84038 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
84039 + return NULL;
84040
84041 /* Scan for closest preceding symbol, and next symbol. (ELF
84042 starts real symbols at 1). */
84043 @@ -3669,7 +3806,7 @@ static int m_show(struct seq_file *m, void *p)
84044 return 0;
84045
84046 seq_printf(m, "%s %u",
84047 - mod->name, mod->init_size + mod->core_size);
84048 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
84049 print_unload_info(m, mod);
84050
84051 /* Informative for users. */
84052 @@ -3678,7 +3815,7 @@ static int m_show(struct seq_file *m, void *p)
84053 mod->state == MODULE_STATE_COMING ? "Loading":
84054 "Live");
84055 /* Used by oprofile and other similar tools. */
84056 - seq_printf(m, " 0x%pK", mod->module_core);
84057 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
84058
84059 /* Taints info */
84060 if (mod->taints)
84061 @@ -3714,7 +3851,17 @@ static const struct file_operations proc_modules_operations = {
84062
84063 static int __init proc_modules_init(void)
84064 {
84065 +#ifndef CONFIG_GRKERNSEC_HIDESYM
84066 +#ifdef CONFIG_GRKERNSEC_PROC_USER
84067 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
84068 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
84069 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
84070 +#else
84071 proc_create("modules", 0, NULL, &proc_modules_operations);
84072 +#endif
84073 +#else
84074 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
84075 +#endif
84076 return 0;
84077 }
84078 module_init(proc_modules_init);
84079 @@ -3775,14 +3922,14 @@ struct module *__module_address(unsigned long addr)
84080 {
84081 struct module *mod;
84082
84083 - if (addr < module_addr_min || addr > module_addr_max)
84084 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
84085 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
84086 return NULL;
84087
84088 list_for_each_entry_rcu(mod, &modules, list) {
84089 if (mod->state == MODULE_STATE_UNFORMED)
84090 continue;
84091 - if (within_module_core(addr, mod)
84092 - || within_module_init(addr, mod))
84093 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
84094 return mod;
84095 }
84096 return NULL;
84097 @@ -3817,11 +3964,20 @@ bool is_module_text_address(unsigned long addr)
84098 */
84099 struct module *__module_text_address(unsigned long addr)
84100 {
84101 - struct module *mod = __module_address(addr);
84102 + struct module *mod;
84103 +
84104 +#ifdef CONFIG_X86_32
84105 + addr = ktla_ktva(addr);
84106 +#endif
84107 +
84108 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
84109 + return NULL;
84110 +
84111 + mod = __module_address(addr);
84112 +
84113 if (mod) {
84114 /* Make sure it's within the text section. */
84115 - if (!within(addr, mod->module_init, mod->init_text_size)
84116 - && !within(addr, mod->module_core, mod->core_text_size))
84117 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
84118 mod = NULL;
84119 }
84120 return mod;
84121 diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
84122 index 7e3443f..b2a1e6b 100644
84123 --- a/kernel/mutex-debug.c
84124 +++ b/kernel/mutex-debug.c
84125 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
84126 }
84127
84128 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
84129 - struct thread_info *ti)
84130 + struct task_struct *task)
84131 {
84132 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
84133
84134 /* Mark the current thread as blocked on the lock: */
84135 - ti->task->blocked_on = waiter;
84136 + task->blocked_on = waiter;
84137 }
84138
84139 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
84140 - struct thread_info *ti)
84141 + struct task_struct *task)
84142 {
84143 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
84144 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
84145 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
84146 - ti->task->blocked_on = NULL;
84147 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
84148 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
84149 + task->blocked_on = NULL;
84150
84151 list_del_init(&waiter->list);
84152 waiter->task = NULL;
84153 diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
84154 index 0799fd3..d06ae3b 100644
84155 --- a/kernel/mutex-debug.h
84156 +++ b/kernel/mutex-debug.h
84157 @@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
84158 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
84159 extern void debug_mutex_add_waiter(struct mutex *lock,
84160 struct mutex_waiter *waiter,
84161 - struct thread_info *ti);
84162 + struct task_struct *task);
84163 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
84164 - struct thread_info *ti);
84165 + struct task_struct *task);
84166 extern void debug_mutex_unlock(struct mutex *lock);
84167 extern void debug_mutex_init(struct mutex *lock, const char *name,
84168 struct lock_class_key *key);
84169 diff --git a/kernel/mutex.c b/kernel/mutex.c
84170 index d24105b..15648eb 100644
84171 --- a/kernel/mutex.c
84172 +++ b/kernel/mutex.c
84173 @@ -135,7 +135,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
84174 node->locked = 1;
84175 return;
84176 }
84177 - ACCESS_ONCE(prev->next) = node;
84178 + ACCESS_ONCE_RW(prev->next) = node;
84179 smp_wmb();
84180 /* Wait until the lock holder passes the lock down */
84181 while (!ACCESS_ONCE(node->locked))
84182 @@ -156,7 +156,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
84183 while (!(next = ACCESS_ONCE(node->next)))
84184 arch_mutex_cpu_relax();
84185 }
84186 - ACCESS_ONCE(next->locked) = 1;
84187 + ACCESS_ONCE_RW(next->locked) = 1;
84188 smp_wmb();
84189 }
84190
84191 @@ -520,7 +520,7 @@ slowpath:
84192 goto skip_wait;
84193
84194 debug_mutex_lock_common(lock, &waiter);
84195 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
84196 + debug_mutex_add_waiter(lock, &waiter, task);
84197
84198 /* add waiting tasks to the end of the waitqueue (FIFO): */
84199 list_add_tail(&waiter.list, &lock->wait_list);
84200 @@ -564,7 +564,7 @@ slowpath:
84201 schedule_preempt_disabled();
84202 spin_lock_mutex(&lock->wait_lock, flags);
84203 }
84204 - mutex_remove_waiter(lock, &waiter, current_thread_info());
84205 + mutex_remove_waiter(lock, &waiter, task);
84206 /* set it to 0 if there are no waiters left: */
84207 if (likely(list_empty(&lock->wait_list)))
84208 atomic_set(&lock->count, 0);
84209 @@ -601,7 +601,7 @@ skip_wait:
84210 return 0;
84211
84212 err:
84213 - mutex_remove_waiter(lock, &waiter, task_thread_info(task));
84214 + mutex_remove_waiter(lock, &waiter, task);
84215 spin_unlock_mutex(&lock->wait_lock, flags);
84216 debug_mutex_free_waiter(&waiter);
84217 mutex_release(&lock->dep_map, 1, ip);
84218 diff --git a/kernel/notifier.c b/kernel/notifier.c
84219 index 2d5cc4c..d9ea600 100644
84220 --- a/kernel/notifier.c
84221 +++ b/kernel/notifier.c
84222 @@ -5,6 +5,7 @@
84223 #include <linux/rcupdate.h>
84224 #include <linux/vmalloc.h>
84225 #include <linux/reboot.h>
84226 +#include <linux/mm.h>
84227
84228 /*
84229 * Notifier list for kernel code which wants to be called
84230 @@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
84231 while ((*nl) != NULL) {
84232 if (n->priority > (*nl)->priority)
84233 break;
84234 - nl = &((*nl)->next);
84235 + nl = (struct notifier_block **)&((*nl)->next);
84236 }
84237 - n->next = *nl;
84238 + pax_open_kernel();
84239 + *(const void **)&n->next = *nl;
84240 rcu_assign_pointer(*nl, n);
84241 + pax_close_kernel();
84242 return 0;
84243 }
84244
84245 @@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
84246 return 0;
84247 if (n->priority > (*nl)->priority)
84248 break;
84249 - nl = &((*nl)->next);
84250 + nl = (struct notifier_block **)&((*nl)->next);
84251 }
84252 - n->next = *nl;
84253 + pax_open_kernel();
84254 + *(const void **)&n->next = *nl;
84255 rcu_assign_pointer(*nl, n);
84256 + pax_close_kernel();
84257 return 0;
84258 }
84259
84260 @@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
84261 {
84262 while ((*nl) != NULL) {
84263 if ((*nl) == n) {
84264 + pax_open_kernel();
84265 rcu_assign_pointer(*nl, n->next);
84266 + pax_close_kernel();
84267 return 0;
84268 }
84269 - nl = &((*nl)->next);
84270 + nl = (struct notifier_block **)&((*nl)->next);
84271 }
84272 return -ENOENT;
84273 }
84274 diff --git a/kernel/panic.c b/kernel/panic.c
84275 index b6c482c..5578061 100644
84276 --- a/kernel/panic.c
84277 +++ b/kernel/panic.c
84278 @@ -407,7 +407,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
84279 disable_trace_on_warning();
84280
84281 pr_warn("------------[ cut here ]------------\n");
84282 - pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pS()\n",
84283 + pr_warn("WARNING: CPU: %d PID: %d at %s:%d %pA()\n",
84284 raw_smp_processor_id(), current->pid, file, line, caller);
84285
84286 if (args)
84287 @@ -461,7 +461,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
84288 */
84289 void __stack_chk_fail(void)
84290 {
84291 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
84292 + dump_stack();
84293 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
84294 __builtin_return_address(0));
84295 }
84296 EXPORT_SYMBOL(__stack_chk_fail);
84297 diff --git a/kernel/pid.c b/kernel/pid.c
84298 index 9b9a266..c20ef80 100644
84299 --- a/kernel/pid.c
84300 +++ b/kernel/pid.c
84301 @@ -33,6 +33,7 @@
84302 #include <linux/rculist.h>
84303 #include <linux/bootmem.h>
84304 #include <linux/hash.h>
84305 +#include <linux/security.h>
84306 #include <linux/pid_namespace.h>
84307 #include <linux/init_task.h>
84308 #include <linux/syscalls.h>
84309 @@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
84310
84311 int pid_max = PID_MAX_DEFAULT;
84312
84313 -#define RESERVED_PIDS 300
84314 +#define RESERVED_PIDS 500
84315
84316 int pid_max_min = RESERVED_PIDS + 1;
84317 int pid_max_max = PID_MAX_LIMIT;
84318 @@ -445,10 +446,18 @@ EXPORT_SYMBOL(pid_task);
84319 */
84320 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
84321 {
84322 + struct task_struct *task;
84323 +
84324 rcu_lockdep_assert(rcu_read_lock_held(),
84325 "find_task_by_pid_ns() needs rcu_read_lock()"
84326 " protection");
84327 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
84328 +
84329 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
84330 +
84331 + if (gr_pid_is_chrooted(task))
84332 + return NULL;
84333 +
84334 + return task;
84335 }
84336
84337 struct task_struct *find_task_by_vpid(pid_t vnr)
84338 @@ -456,6 +465,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
84339 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
84340 }
84341
84342 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
84343 +{
84344 + rcu_lockdep_assert(rcu_read_lock_held(),
84345 + "find_task_by_pid_ns() needs rcu_read_lock()"
84346 + " protection");
84347 + return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
84348 +}
84349 +
84350 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
84351 {
84352 struct pid *pid;
84353 diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
84354 index 4208655..19f36a5 100644
84355 --- a/kernel/pid_namespace.c
84356 +++ b/kernel/pid_namespace.c
84357 @@ -247,7 +247,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
84358 void __user *buffer, size_t *lenp, loff_t *ppos)
84359 {
84360 struct pid_namespace *pid_ns = task_active_pid_ns(current);
84361 - struct ctl_table tmp = *table;
84362 + ctl_table_no_const tmp = *table;
84363
84364 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
84365 return -EPERM;
84366 diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
84367 index c7f31aa..2b44977 100644
84368 --- a/kernel/posix-cpu-timers.c
84369 +++ b/kernel/posix-cpu-timers.c
84370 @@ -1521,14 +1521,14 @@ struct k_clock clock_posix_cpu = {
84371
84372 static __init int init_posix_cpu_timers(void)
84373 {
84374 - struct k_clock process = {
84375 + static struct k_clock process = {
84376 .clock_getres = process_cpu_clock_getres,
84377 .clock_get = process_cpu_clock_get,
84378 .timer_create = process_cpu_timer_create,
84379 .nsleep = process_cpu_nsleep,
84380 .nsleep_restart = process_cpu_nsleep_restart,
84381 };
84382 - struct k_clock thread = {
84383 + static struct k_clock thread = {
84384 .clock_getres = thread_cpu_clock_getres,
84385 .clock_get = thread_cpu_clock_get,
84386 .timer_create = thread_cpu_timer_create,
84387 diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
84388 index 424c2d4..679242f 100644
84389 --- a/kernel/posix-timers.c
84390 +++ b/kernel/posix-timers.c
84391 @@ -43,6 +43,7 @@
84392 #include <linux/hash.h>
84393 #include <linux/posix-clock.h>
84394 #include <linux/posix-timers.h>
84395 +#include <linux/grsecurity.h>
84396 #include <linux/syscalls.h>
84397 #include <linux/wait.h>
84398 #include <linux/workqueue.h>
84399 @@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
84400 * which we beg off on and pass to do_sys_settimeofday().
84401 */
84402
84403 -static struct k_clock posix_clocks[MAX_CLOCKS];
84404 +static struct k_clock *posix_clocks[MAX_CLOCKS];
84405
84406 /*
84407 * These ones are defined below.
84408 @@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
84409 */
84410 static __init int init_posix_timers(void)
84411 {
84412 - struct k_clock clock_realtime = {
84413 + static struct k_clock clock_realtime = {
84414 .clock_getres = hrtimer_get_res,
84415 .clock_get = posix_clock_realtime_get,
84416 .clock_set = posix_clock_realtime_set,
84417 @@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
84418 .timer_get = common_timer_get,
84419 .timer_del = common_timer_del,
84420 };
84421 - struct k_clock clock_monotonic = {
84422 + static struct k_clock clock_monotonic = {
84423 .clock_getres = hrtimer_get_res,
84424 .clock_get = posix_ktime_get_ts,
84425 .nsleep = common_nsleep,
84426 @@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
84427 .timer_get = common_timer_get,
84428 .timer_del = common_timer_del,
84429 };
84430 - struct k_clock clock_monotonic_raw = {
84431 + static struct k_clock clock_monotonic_raw = {
84432 .clock_getres = hrtimer_get_res,
84433 .clock_get = posix_get_monotonic_raw,
84434 };
84435 - struct k_clock clock_realtime_coarse = {
84436 + static struct k_clock clock_realtime_coarse = {
84437 .clock_getres = posix_get_coarse_res,
84438 .clock_get = posix_get_realtime_coarse,
84439 };
84440 - struct k_clock clock_monotonic_coarse = {
84441 + static struct k_clock clock_monotonic_coarse = {
84442 .clock_getres = posix_get_coarse_res,
84443 .clock_get = posix_get_monotonic_coarse,
84444 };
84445 - struct k_clock clock_tai = {
84446 + static struct k_clock clock_tai = {
84447 .clock_getres = hrtimer_get_res,
84448 .clock_get = posix_get_tai,
84449 .nsleep = common_nsleep,
84450 @@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
84451 .timer_get = common_timer_get,
84452 .timer_del = common_timer_del,
84453 };
84454 - struct k_clock clock_boottime = {
84455 + static struct k_clock clock_boottime = {
84456 .clock_getres = hrtimer_get_res,
84457 .clock_get = posix_get_boottime,
84458 .nsleep = common_nsleep,
84459 @@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
84460 return;
84461 }
84462
84463 - posix_clocks[clock_id] = *new_clock;
84464 + posix_clocks[clock_id] = new_clock;
84465 }
84466 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
84467
84468 @@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
84469 return (id & CLOCKFD_MASK) == CLOCKFD ?
84470 &clock_posix_dynamic : &clock_posix_cpu;
84471
84472 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
84473 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
84474 return NULL;
84475 - return &posix_clocks[id];
84476 + return posix_clocks[id];
84477 }
84478
84479 static int common_timer_create(struct k_itimer *new_timer)
84480 @@ -597,7 +598,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
84481 struct k_clock *kc = clockid_to_kclock(which_clock);
84482 struct k_itimer *new_timer;
84483 int error, new_timer_id;
84484 - sigevent_t event;
84485 + sigevent_t event = { };
84486 int it_id_set = IT_ID_NOT_SET;
84487
84488 if (!kc)
84489 @@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
84490 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
84491 return -EFAULT;
84492
84493 + /* only the CLOCK_REALTIME clock can be set, all other clocks
84494 + have their clock_set fptr set to a nosettime dummy function
84495 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
84496 + call common_clock_set, which calls do_sys_settimeofday, which
84497 + we hook
84498 + */
84499 +
84500 return kc->clock_set(which_clock, &new_tp);
84501 }
84502
84503 diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
84504 index d444c4e..bc3de51 100644
84505 --- a/kernel/power/Kconfig
84506 +++ b/kernel/power/Kconfig
84507 @@ -24,6 +24,8 @@ config HIBERNATE_CALLBACKS
84508 config HIBERNATION
84509 bool "Hibernation (aka 'suspend to disk')"
84510 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
84511 + depends on !GRKERNSEC_KMEM
84512 + depends on !PAX_MEMORY_SANITIZE
84513 select HIBERNATE_CALLBACKS
84514 select LZO_COMPRESS
84515 select LZO_DECOMPRESS
84516 diff --git a/kernel/power/process.c b/kernel/power/process.c
84517 index 06ec886..9dba35e 100644
84518 --- a/kernel/power/process.c
84519 +++ b/kernel/power/process.c
84520 @@ -34,6 +34,7 @@ static int try_to_freeze_tasks(bool user_only)
84521 unsigned int elapsed_msecs;
84522 bool wakeup = false;
84523 int sleep_usecs = USEC_PER_MSEC;
84524 + bool timedout = false;
84525
84526 do_gettimeofday(&start);
84527
84528 @@ -44,13 +45,20 @@ static int try_to_freeze_tasks(bool user_only)
84529
84530 while (true) {
84531 todo = 0;
84532 + if (time_after(jiffies, end_time))
84533 + timedout = true;
84534 read_lock(&tasklist_lock);
84535 do_each_thread(g, p) {
84536 if (p == current || !freeze_task(p))
84537 continue;
84538
84539 - if (!freezer_should_skip(p))
84540 + if (!freezer_should_skip(p)) {
84541 todo++;
84542 + if (timedout) {
84543 + printk(KERN_ERR "Task refusing to freeze:\n");
84544 + sched_show_task(p);
84545 + }
84546 + }
84547 } while_each_thread(g, p);
84548 read_unlock(&tasklist_lock);
84549
84550 @@ -59,7 +67,7 @@ static int try_to_freeze_tasks(bool user_only)
84551 todo += wq_busy;
84552 }
84553
84554 - if (!todo || time_after(jiffies, end_time))
84555 + if (!todo || timedout)
84556 break;
84557
84558 if (pm_wakeup_pending()) {
84559 diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
84560 index b4e8500..b457a6c 100644
84561 --- a/kernel/printk/printk.c
84562 +++ b/kernel/printk/printk.c
84563 @@ -385,6 +385,11 @@ static int check_syslog_permissions(int type, bool from_file)
84564 if (from_file && type != SYSLOG_ACTION_OPEN)
84565 return 0;
84566
84567 +#ifdef CONFIG_GRKERNSEC_DMESG
84568 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
84569 + return -EPERM;
84570 +#endif
84571 +
84572 if (syslog_action_restricted(type)) {
84573 if (capable(CAP_SYSLOG))
84574 return 0;
84575 diff --git a/kernel/profile.c b/kernel/profile.c
84576 index 6631e1e..310c266 100644
84577 --- a/kernel/profile.c
84578 +++ b/kernel/profile.c
84579 @@ -37,7 +37,7 @@ struct profile_hit {
84580 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
84581 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
84582
84583 -static atomic_t *prof_buffer;
84584 +static atomic_unchecked_t *prof_buffer;
84585 static unsigned long prof_len, prof_shift;
84586
84587 int prof_on __read_mostly;
84588 @@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
84589 hits[i].pc = 0;
84590 continue;
84591 }
84592 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
84593 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
84594 hits[i].hits = hits[i].pc = 0;
84595 }
84596 }
84597 @@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
84598 * Add the current hit(s) and flush the write-queue out
84599 * to the global buffer:
84600 */
84601 - atomic_add(nr_hits, &prof_buffer[pc]);
84602 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
84603 for (i = 0; i < NR_PROFILE_HIT; ++i) {
84604 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
84605 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
84606 hits[i].pc = hits[i].hits = 0;
84607 }
84608 out:
84609 @@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
84610 {
84611 unsigned long pc;
84612 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
84613 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
84614 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
84615 }
84616 #endif /* !CONFIG_SMP */
84617
84618 @@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
84619 return -EFAULT;
84620 buf++; p++; count--; read++;
84621 }
84622 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
84623 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
84624 if (copy_to_user(buf, (void *)pnt, count))
84625 return -EFAULT;
84626 read += count;
84627 @@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
84628 }
84629 #endif
84630 profile_discard_flip_buffers();
84631 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
84632 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
84633 return count;
84634 }
84635
84636 diff --git a/kernel/ptrace.c b/kernel/ptrace.c
84637 index 1f4bcb3..99cf7ab 100644
84638 --- a/kernel/ptrace.c
84639 +++ b/kernel/ptrace.c
84640 @@ -327,7 +327,7 @@ static int ptrace_attach(struct task_struct *task, long request,
84641 if (seize)
84642 flags |= PT_SEIZED;
84643 rcu_read_lock();
84644 - if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
84645 + if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
84646 flags |= PT_PTRACE_CAP;
84647 rcu_read_unlock();
84648 task->ptrace = flags;
84649 @@ -538,7 +538,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
84650 break;
84651 return -EIO;
84652 }
84653 - if (copy_to_user(dst, buf, retval))
84654 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
84655 return -EFAULT;
84656 copied += retval;
84657 src += retval;
84658 @@ -806,7 +806,7 @@ int ptrace_request(struct task_struct *child, long request,
84659 bool seized = child->ptrace & PT_SEIZED;
84660 int ret = -EIO;
84661 siginfo_t siginfo, *si;
84662 - void __user *datavp = (void __user *) data;
84663 + void __user *datavp = (__force void __user *) data;
84664 unsigned long __user *datalp = datavp;
84665 unsigned long flags;
84666
84667 @@ -1052,14 +1052,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
84668 goto out;
84669 }
84670
84671 + if (gr_handle_ptrace(child, request)) {
84672 + ret = -EPERM;
84673 + goto out_put_task_struct;
84674 + }
84675 +
84676 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
84677 ret = ptrace_attach(child, request, addr, data);
84678 /*
84679 * Some architectures need to do book-keeping after
84680 * a ptrace attach.
84681 */
84682 - if (!ret)
84683 + if (!ret) {
84684 arch_ptrace_attach(child);
84685 + gr_audit_ptrace(child);
84686 + }
84687 goto out_put_task_struct;
84688 }
84689
84690 @@ -1087,7 +1094,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
84691 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
84692 if (copied != sizeof(tmp))
84693 return -EIO;
84694 - return put_user(tmp, (unsigned long __user *)data);
84695 + return put_user(tmp, (__force unsigned long __user *)data);
84696 }
84697
84698 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
84699 @@ -1181,7 +1188,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
84700 }
84701
84702 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
84703 - compat_long_t addr, compat_long_t data)
84704 + compat_ulong_t addr, compat_ulong_t data)
84705 {
84706 struct task_struct *child;
84707 long ret;
84708 @@ -1197,14 +1204,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
84709 goto out;
84710 }
84711
84712 + if (gr_handle_ptrace(child, request)) {
84713 + ret = -EPERM;
84714 + goto out_put_task_struct;
84715 + }
84716 +
84717 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
84718 ret = ptrace_attach(child, request, addr, data);
84719 /*
84720 * Some architectures need to do book-keeping after
84721 * a ptrace attach.
84722 */
84723 - if (!ret)
84724 + if (!ret) {
84725 arch_ptrace_attach(child);
84726 + gr_audit_ptrace(child);
84727 + }
84728 goto out_put_task_struct;
84729 }
84730
84731 diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
84732 index b02a339..ce2d20c 100644
84733 --- a/kernel/rcupdate.c
84734 +++ b/kernel/rcupdate.c
84735 @@ -312,10 +312,10 @@ int rcu_jiffies_till_stall_check(void)
84736 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
84737 */
84738 if (till_stall_check < 3) {
84739 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
84740 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
84741 till_stall_check = 3;
84742 } else if (till_stall_check > 300) {
84743 - ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
84744 + ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
84745 till_stall_check = 300;
84746 }
84747 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
84748 diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
84749 index 9ed6075..c9e8a63 100644
84750 --- a/kernel/rcutiny.c
84751 +++ b/kernel/rcutiny.c
84752 @@ -45,7 +45,7 @@
84753 /* Forward declarations for rcutiny_plugin.h. */
84754 struct rcu_ctrlblk;
84755 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
84756 -static void rcu_process_callbacks(struct softirq_action *unused);
84757 +static void rcu_process_callbacks(void);
84758 static void __call_rcu(struct rcu_head *head,
84759 void (*func)(struct rcu_head *rcu),
84760 struct rcu_ctrlblk *rcp);
84761 @@ -309,7 +309,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
84762 false));
84763 }
84764
84765 -static void rcu_process_callbacks(struct softirq_action *unused)
84766 +static __latent_entropy void rcu_process_callbacks(void)
84767 {
84768 __rcu_process_callbacks(&rcu_sched_ctrlblk);
84769 __rcu_process_callbacks(&rcu_bh_ctrlblk);
84770 diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
84771 index be63101..05cf721 100644
84772 --- a/kernel/rcutorture.c
84773 +++ b/kernel/rcutorture.c
84774 @@ -170,12 +170,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
84775 { 0 };
84776 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
84777 { 0 };
84778 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
84779 -static atomic_t n_rcu_torture_alloc;
84780 -static atomic_t n_rcu_torture_alloc_fail;
84781 -static atomic_t n_rcu_torture_free;
84782 -static atomic_t n_rcu_torture_mberror;
84783 -static atomic_t n_rcu_torture_error;
84784 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
84785 +static atomic_unchecked_t n_rcu_torture_alloc;
84786 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
84787 +static atomic_unchecked_t n_rcu_torture_free;
84788 +static atomic_unchecked_t n_rcu_torture_mberror;
84789 +static atomic_unchecked_t n_rcu_torture_error;
84790 static long n_rcu_torture_barrier_error;
84791 static long n_rcu_torture_boost_ktrerror;
84792 static long n_rcu_torture_boost_rterror;
84793 @@ -293,11 +293,11 @@ rcu_torture_alloc(void)
84794
84795 spin_lock_bh(&rcu_torture_lock);
84796 if (list_empty(&rcu_torture_freelist)) {
84797 - atomic_inc(&n_rcu_torture_alloc_fail);
84798 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
84799 spin_unlock_bh(&rcu_torture_lock);
84800 return NULL;
84801 }
84802 - atomic_inc(&n_rcu_torture_alloc);
84803 + atomic_inc_unchecked(&n_rcu_torture_alloc);
84804 p = rcu_torture_freelist.next;
84805 list_del_init(p);
84806 spin_unlock_bh(&rcu_torture_lock);
84807 @@ -310,7 +310,7 @@ rcu_torture_alloc(void)
84808 static void
84809 rcu_torture_free(struct rcu_torture *p)
84810 {
84811 - atomic_inc(&n_rcu_torture_free);
84812 + atomic_inc_unchecked(&n_rcu_torture_free);
84813 spin_lock_bh(&rcu_torture_lock);
84814 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
84815 spin_unlock_bh(&rcu_torture_lock);
84816 @@ -431,7 +431,7 @@ rcu_torture_cb(struct rcu_head *p)
84817 i = rp->rtort_pipe_count;
84818 if (i > RCU_TORTURE_PIPE_LEN)
84819 i = RCU_TORTURE_PIPE_LEN;
84820 - atomic_inc(&rcu_torture_wcount[i]);
84821 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
84822 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
84823 rp->rtort_mbtest = 0;
84824 rcu_torture_free(rp);
84825 @@ -821,7 +821,7 @@ rcu_torture_writer(void *arg)
84826 i = old_rp->rtort_pipe_count;
84827 if (i > RCU_TORTURE_PIPE_LEN)
84828 i = RCU_TORTURE_PIPE_LEN;
84829 - atomic_inc(&rcu_torture_wcount[i]);
84830 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
84831 old_rp->rtort_pipe_count++;
84832 if (gp_normal == gp_exp)
84833 exp = !!(rcu_random(&rand) & 0x80);
84834 @@ -839,7 +839,7 @@ rcu_torture_writer(void *arg)
84835 i = rp->rtort_pipe_count;
84836 if (i > RCU_TORTURE_PIPE_LEN)
84837 i = RCU_TORTURE_PIPE_LEN;
84838 - atomic_inc(&rcu_torture_wcount[i]);
84839 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
84840 if (++rp->rtort_pipe_count >=
84841 RCU_TORTURE_PIPE_LEN) {
84842 rp->rtort_mbtest = 0;
84843 @@ -938,7 +938,7 @@ static void rcu_torture_timer(unsigned long unused)
84844 return;
84845 }
84846 if (p->rtort_mbtest == 0)
84847 - atomic_inc(&n_rcu_torture_mberror);
84848 + atomic_inc_unchecked(&n_rcu_torture_mberror);
84849 spin_lock(&rand_lock);
84850 cur_ops->read_delay(&rand);
84851 n_rcu_torture_timers++;
84852 @@ -1008,7 +1008,7 @@ rcu_torture_reader(void *arg)
84853 continue;
84854 }
84855 if (p->rtort_mbtest == 0)
84856 - atomic_inc(&n_rcu_torture_mberror);
84857 + atomic_inc_unchecked(&n_rcu_torture_mberror);
84858 cur_ops->read_delay(&rand);
84859 preempt_disable();
84860 pipe_count = p->rtort_pipe_count;
84861 @@ -1071,11 +1071,11 @@ rcu_torture_printk(char *page)
84862 rcu_torture_current,
84863 rcu_torture_current_version,
84864 list_empty(&rcu_torture_freelist),
84865 - atomic_read(&n_rcu_torture_alloc),
84866 - atomic_read(&n_rcu_torture_alloc_fail),
84867 - atomic_read(&n_rcu_torture_free));
84868 + atomic_read_unchecked(&n_rcu_torture_alloc),
84869 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
84870 + atomic_read_unchecked(&n_rcu_torture_free));
84871 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
84872 - atomic_read(&n_rcu_torture_mberror),
84873 + atomic_read_unchecked(&n_rcu_torture_mberror),
84874 n_rcu_torture_boost_ktrerror,
84875 n_rcu_torture_boost_rterror);
84876 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
84877 @@ -1094,14 +1094,14 @@ rcu_torture_printk(char *page)
84878 n_barrier_attempts,
84879 n_rcu_torture_barrier_error);
84880 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
84881 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
84882 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
84883 n_rcu_torture_barrier_error != 0 ||
84884 n_rcu_torture_boost_ktrerror != 0 ||
84885 n_rcu_torture_boost_rterror != 0 ||
84886 n_rcu_torture_boost_failure != 0 ||
84887 i > 1) {
84888 cnt += sprintf(&page[cnt], "!!! ");
84889 - atomic_inc(&n_rcu_torture_error);
84890 + atomic_inc_unchecked(&n_rcu_torture_error);
84891 WARN_ON_ONCE(1);
84892 }
84893 cnt += sprintf(&page[cnt], "Reader Pipe: ");
84894 @@ -1115,7 +1115,7 @@ rcu_torture_printk(char *page)
84895 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
84896 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
84897 cnt += sprintf(&page[cnt], " %d",
84898 - atomic_read(&rcu_torture_wcount[i]));
84899 + atomic_read_unchecked(&rcu_torture_wcount[i]));
84900 }
84901 cnt += sprintf(&page[cnt], "\n");
84902 if (cur_ops->stats)
84903 @@ -1830,7 +1830,7 @@ rcu_torture_cleanup(void)
84904
84905 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
84906
84907 - if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
84908 + if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
84909 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
84910 else if (n_online_successes != n_online_attempts ||
84911 n_offline_successes != n_offline_attempts)
84912 @@ -1952,18 +1952,18 @@ rcu_torture_init(void)
84913
84914 rcu_torture_current = NULL;
84915 rcu_torture_current_version = 0;
84916 - atomic_set(&n_rcu_torture_alloc, 0);
84917 - atomic_set(&n_rcu_torture_alloc_fail, 0);
84918 - atomic_set(&n_rcu_torture_free, 0);
84919 - atomic_set(&n_rcu_torture_mberror, 0);
84920 - atomic_set(&n_rcu_torture_error, 0);
84921 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
84922 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
84923 + atomic_set_unchecked(&n_rcu_torture_free, 0);
84924 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
84925 + atomic_set_unchecked(&n_rcu_torture_error, 0);
84926 n_rcu_torture_barrier_error = 0;
84927 n_rcu_torture_boost_ktrerror = 0;
84928 n_rcu_torture_boost_rterror = 0;
84929 n_rcu_torture_boost_failure = 0;
84930 n_rcu_torture_boosts = 0;
84931 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
84932 - atomic_set(&rcu_torture_wcount[i], 0);
84933 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
84934 for_each_possible_cpu(cpu) {
84935 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
84936 per_cpu(rcu_torture_count, cpu)[i] = 0;
84937 diff --git a/kernel/rcutree.c b/kernel/rcutree.c
84938 index 32618b3..c1fb822 100644
84939 --- a/kernel/rcutree.c
84940 +++ b/kernel/rcutree.c
84941 @@ -382,9 +382,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
84942 rcu_prepare_for_idle(smp_processor_id());
84943 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
84944 smp_mb__before_atomic_inc(); /* See above. */
84945 - atomic_inc(&rdtp->dynticks);
84946 + atomic_inc_unchecked(&rdtp->dynticks);
84947 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
84948 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
84949 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
84950
84951 /*
84952 * It is illegal to enter an extended quiescent state while
84953 @@ -501,10 +501,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
84954 int user)
84955 {
84956 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
84957 - atomic_inc(&rdtp->dynticks);
84958 + atomic_inc_unchecked(&rdtp->dynticks);
84959 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
84960 smp_mb__after_atomic_inc(); /* See above. */
84961 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
84962 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
84963 rcu_cleanup_after_idle(smp_processor_id());
84964 trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
84965 if (!user && !is_idle_task(current)) {
84966 @@ -623,14 +623,14 @@ void rcu_nmi_enter(void)
84967 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
84968
84969 if (rdtp->dynticks_nmi_nesting == 0 &&
84970 - (atomic_read(&rdtp->dynticks) & 0x1))
84971 + (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
84972 return;
84973 rdtp->dynticks_nmi_nesting++;
84974 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
84975 - atomic_inc(&rdtp->dynticks);
84976 + atomic_inc_unchecked(&rdtp->dynticks);
84977 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
84978 smp_mb__after_atomic_inc(); /* See above. */
84979 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
84980 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
84981 }
84982
84983 /**
84984 @@ -649,9 +649,9 @@ void rcu_nmi_exit(void)
84985 return;
84986 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
84987 smp_mb__before_atomic_inc(); /* See above. */
84988 - atomic_inc(&rdtp->dynticks);
84989 + atomic_inc_unchecked(&rdtp->dynticks);
84990 smp_mb__after_atomic_inc(); /* Force delay to next write. */
84991 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
84992 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
84993 }
84994
84995 /**
84996 @@ -665,7 +665,7 @@ int rcu_is_cpu_idle(void)
84997 int ret;
84998
84999 preempt_disable();
85000 - ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
85001 + ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
85002 preempt_enable();
85003 return ret;
85004 }
85005 @@ -734,7 +734,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
85006 static int dyntick_save_progress_counter(struct rcu_data *rdp,
85007 bool *isidle, unsigned long *maxj)
85008 {
85009 - rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
85010 + rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
85011 rcu_sysidle_check_cpu(rdp, isidle, maxj);
85012 return (rdp->dynticks_snap & 0x1) == 0;
85013 }
85014 @@ -751,7 +751,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
85015 unsigned int curr;
85016 unsigned int snap;
85017
85018 - curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
85019 + curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
85020 snap = (unsigned int)rdp->dynticks_snap;
85021
85022 /*
85023 @@ -1341,9 +1341,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
85024 rdp = this_cpu_ptr(rsp->rda);
85025 rcu_preempt_check_blocked_tasks(rnp);
85026 rnp->qsmask = rnp->qsmaskinit;
85027 - ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
85028 + ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
85029 WARN_ON_ONCE(rnp->completed != rsp->completed);
85030 - ACCESS_ONCE(rnp->completed) = rsp->completed;
85031 + ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
85032 if (rnp == rdp->mynode)
85033 __note_gp_changes(rsp, rnp, rdp);
85034 rcu_preempt_boost_start_gp(rnp);
85035 @@ -1434,7 +1434,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
85036 */
85037 rcu_for_each_node_breadth_first(rsp, rnp) {
85038 raw_spin_lock_irq(&rnp->lock);
85039 - ACCESS_ONCE(rnp->completed) = rsp->gpnum;
85040 + ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
85041 rdp = this_cpu_ptr(rsp->rda);
85042 if (rnp == rdp->mynode)
85043 __note_gp_changes(rsp, rnp, rdp);
85044 @@ -1766,7 +1766,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
85045 rsp->qlen += rdp->qlen;
85046 rdp->n_cbs_orphaned += rdp->qlen;
85047 rdp->qlen_lazy = 0;
85048 - ACCESS_ONCE(rdp->qlen) = 0;
85049 + ACCESS_ONCE_RW(rdp->qlen) = 0;
85050 }
85051
85052 /*
85053 @@ -2012,7 +2012,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
85054 }
85055 smp_mb(); /* List handling before counting for rcu_barrier(). */
85056 rdp->qlen_lazy -= count_lazy;
85057 - ACCESS_ONCE(rdp->qlen) -= count;
85058 + ACCESS_ONCE_RW(rdp->qlen) -= count;
85059 rdp->n_cbs_invoked += count;
85060
85061 /* Reinstate batch limit if we have worked down the excess. */
85062 @@ -2209,7 +2209,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
85063 /*
85064 * Do RCU core processing for the current CPU.
85065 */
85066 -static void rcu_process_callbacks(struct softirq_action *unused)
85067 +static __latent_entropy void rcu_process_callbacks(void)
85068 {
85069 struct rcu_state *rsp;
85070
85071 @@ -2316,7 +2316,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
85072 WARN_ON_ONCE((unsigned long)head & 0x3); /* Misaligned rcu_head! */
85073 if (debug_rcu_head_queue(head)) {
85074 /* Probable double call_rcu(), so leak the callback. */
85075 - ACCESS_ONCE(head->func) = rcu_leak_callback;
85076 + ACCESS_ONCE_RW(head->func) = rcu_leak_callback;
85077 WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
85078 return;
85079 }
85080 @@ -2344,7 +2344,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
85081 local_irq_restore(flags);
85082 return;
85083 }
85084 - ACCESS_ONCE(rdp->qlen)++;
85085 + ACCESS_ONCE_RW(rdp->qlen)++;
85086 if (lazy)
85087 rdp->qlen_lazy++;
85088 else
85089 @@ -2553,11 +2553,11 @@ void synchronize_sched_expedited(void)
85090 * counter wrap on a 32-bit system. Quite a few more CPUs would of
85091 * course be required on a 64-bit system.
85092 */
85093 - if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
85094 + if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
85095 (ulong)atomic_long_read(&rsp->expedited_done) +
85096 ULONG_MAX / 8)) {
85097 synchronize_sched();
85098 - atomic_long_inc(&rsp->expedited_wrap);
85099 + atomic_long_inc_unchecked(&rsp->expedited_wrap);
85100 return;
85101 }
85102
85103 @@ -2565,7 +2565,7 @@ void synchronize_sched_expedited(void)
85104 * Take a ticket. Note that atomic_inc_return() implies a
85105 * full memory barrier.
85106 */
85107 - snap = atomic_long_inc_return(&rsp->expedited_start);
85108 + snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
85109 firstsnap = snap;
85110 get_online_cpus();
85111 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
85112 @@ -2578,14 +2578,14 @@ void synchronize_sched_expedited(void)
85113 synchronize_sched_expedited_cpu_stop,
85114 NULL) == -EAGAIN) {
85115 put_online_cpus();
85116 - atomic_long_inc(&rsp->expedited_tryfail);
85117 + atomic_long_inc_unchecked(&rsp->expedited_tryfail);
85118
85119 /* Check to see if someone else did our work for us. */
85120 s = atomic_long_read(&rsp->expedited_done);
85121 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
85122 /* ensure test happens before caller kfree */
85123 smp_mb__before_atomic_inc(); /* ^^^ */
85124 - atomic_long_inc(&rsp->expedited_workdone1);
85125 + atomic_long_inc_unchecked(&rsp->expedited_workdone1);
85126 return;
85127 }
85128
85129 @@ -2594,7 +2594,7 @@ void synchronize_sched_expedited(void)
85130 udelay(trycount * num_online_cpus());
85131 } else {
85132 wait_rcu_gp(call_rcu_sched);
85133 - atomic_long_inc(&rsp->expedited_normal);
85134 + atomic_long_inc_unchecked(&rsp->expedited_normal);
85135 return;
85136 }
85137
85138 @@ -2603,7 +2603,7 @@ void synchronize_sched_expedited(void)
85139 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
85140 /* ensure test happens before caller kfree */
85141 smp_mb__before_atomic_inc(); /* ^^^ */
85142 - atomic_long_inc(&rsp->expedited_workdone2);
85143 + atomic_long_inc_unchecked(&rsp->expedited_workdone2);
85144 return;
85145 }
85146
85147 @@ -2615,10 +2615,10 @@ void synchronize_sched_expedited(void)
85148 * period works for us.
85149 */
85150 get_online_cpus();
85151 - snap = atomic_long_read(&rsp->expedited_start);
85152 + snap = atomic_long_read_unchecked(&rsp->expedited_start);
85153 smp_mb(); /* ensure read is before try_stop_cpus(). */
85154 }
85155 - atomic_long_inc(&rsp->expedited_stoppedcpus);
85156 + atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
85157
85158 /*
85159 * Everyone up to our most recent fetch is covered by our grace
85160 @@ -2627,16 +2627,16 @@ void synchronize_sched_expedited(void)
85161 * than we did already did their update.
85162 */
85163 do {
85164 - atomic_long_inc(&rsp->expedited_done_tries);
85165 + atomic_long_inc_unchecked(&rsp->expedited_done_tries);
85166 s = atomic_long_read(&rsp->expedited_done);
85167 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
85168 /* ensure test happens before caller kfree */
85169 smp_mb__before_atomic_inc(); /* ^^^ */
85170 - atomic_long_inc(&rsp->expedited_done_lost);
85171 + atomic_long_inc_unchecked(&rsp->expedited_done_lost);
85172 break;
85173 }
85174 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
85175 - atomic_long_inc(&rsp->expedited_done_exit);
85176 + atomic_long_inc_unchecked(&rsp->expedited_done_exit);
85177
85178 put_online_cpus();
85179 }
85180 @@ -2829,7 +2829,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
85181 * ACCESS_ONCE() to prevent the compiler from speculating
85182 * the increment to precede the early-exit check.
85183 */
85184 - ACCESS_ONCE(rsp->n_barrier_done)++;
85185 + ACCESS_ONCE_RW(rsp->n_barrier_done)++;
85186 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
85187 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
85188 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
85189 @@ -2879,7 +2879,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
85190
85191 /* Increment ->n_barrier_done to prevent duplicate work. */
85192 smp_mb(); /* Keep increment after above mechanism. */
85193 - ACCESS_ONCE(rsp->n_barrier_done)++;
85194 + ACCESS_ONCE_RW(rsp->n_barrier_done)++;
85195 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
85196 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
85197 smp_mb(); /* Keep increment before caller's subsequent code. */
85198 @@ -2924,10 +2924,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
85199 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
85200 init_callback_list(rdp);
85201 rdp->qlen_lazy = 0;
85202 - ACCESS_ONCE(rdp->qlen) = 0;
85203 + ACCESS_ONCE_RW(rdp->qlen) = 0;
85204 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
85205 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
85206 - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
85207 + WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
85208 rdp->cpu = cpu;
85209 rdp->rsp = rsp;
85210 rcu_boot_init_nocb_percpu_data(rdp);
85211 @@ -2961,8 +2961,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
85212 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
85213 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
85214 rcu_sysidle_init_percpu_data(rdp->dynticks);
85215 - atomic_set(&rdp->dynticks->dynticks,
85216 - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
85217 + atomic_set_unchecked(&rdp->dynticks->dynticks,
85218 + (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
85219 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
85220
85221 /* Add CPU to rcu_node bitmasks. */
85222 diff --git a/kernel/rcutree.h b/kernel/rcutree.h
85223 index 5f97eab..db8f687 100644
85224 --- a/kernel/rcutree.h
85225 +++ b/kernel/rcutree.h
85226 @@ -87,11 +87,11 @@ struct rcu_dynticks {
85227 long long dynticks_nesting; /* Track irq/process nesting level. */
85228 /* Process level is worth LLONG_MAX/2. */
85229 int dynticks_nmi_nesting; /* Track NMI nesting level. */
85230 - atomic_t dynticks; /* Even value for idle, else odd. */
85231 + atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
85232 #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
85233 long long dynticks_idle_nesting;
85234 /* irq/process nesting level from idle. */
85235 - atomic_t dynticks_idle; /* Even value for idle, else odd. */
85236 + atomic_unchecked_t dynticks_idle; /* Even value for idle, else odd. */
85237 /* "Idle" excludes userspace execution. */
85238 unsigned long dynticks_idle_jiffies;
85239 /* End of last non-NMI non-idle period. */
85240 @@ -427,17 +427,17 @@ struct rcu_state {
85241 /* _rcu_barrier(). */
85242 /* End of fields guarded by barrier_mutex. */
85243
85244 - atomic_long_t expedited_start; /* Starting ticket. */
85245 - atomic_long_t expedited_done; /* Done ticket. */
85246 - atomic_long_t expedited_wrap; /* # near-wrap incidents. */
85247 - atomic_long_t expedited_tryfail; /* # acquisition failures. */
85248 - atomic_long_t expedited_workdone1; /* # done by others #1. */
85249 - atomic_long_t expedited_workdone2; /* # done by others #2. */
85250 - atomic_long_t expedited_normal; /* # fallbacks to normal. */
85251 - atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
85252 - atomic_long_t expedited_done_tries; /* # tries to update _done. */
85253 - atomic_long_t expedited_done_lost; /* # times beaten to _done. */
85254 - atomic_long_t expedited_done_exit; /* # times exited _done loop. */
85255 + atomic_long_unchecked_t expedited_start; /* Starting ticket. */
85256 + atomic_long_t expedited_done; /* Done ticket. */
85257 + atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
85258 + atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
85259 + atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
85260 + atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
85261 + atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
85262 + atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
85263 + atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
85264 + atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
85265 + atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
85266
85267 unsigned long jiffies_force_qs; /* Time at which to invoke */
85268 /* force_quiescent_state(). */
85269 diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
85270 index 130c97b..bcbe7f7 100644
85271 --- a/kernel/rcutree_plugin.h
85272 +++ b/kernel/rcutree_plugin.h
85273 @@ -744,7 +744,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
85274 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
85275 {
85276 return !rcu_preempted_readers_exp(rnp) &&
85277 - ACCESS_ONCE(rnp->expmask) == 0;
85278 + ACCESS_ONCE_RW(rnp->expmask) == 0;
85279 }
85280
85281 /*
85282 @@ -900,7 +900,7 @@ void synchronize_rcu_expedited(void)
85283
85284 /* Clean up and exit. */
85285 smp_mb(); /* ensure expedited GP seen before counter increment. */
85286 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
85287 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
85288 unlock_mb_ret:
85289 mutex_unlock(&sync_rcu_preempt_exp_mutex);
85290 mb_ret:
85291 @@ -1474,7 +1474,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
85292 free_cpumask_var(cm);
85293 }
85294
85295 -static struct smp_hotplug_thread rcu_cpu_thread_spec = {
85296 +static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
85297 .store = &rcu_cpu_kthread_task,
85298 .thread_should_run = rcu_cpu_kthread_should_run,
85299 .thread_fn = rcu_cpu_kthread,
85300 @@ -1939,7 +1939,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
85301 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
85302 pr_err("\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
85303 cpu, ticks_value, ticks_title,
85304 - atomic_read(&rdtp->dynticks) & 0xfff,
85305 + atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
85306 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
85307 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
85308 fast_no_hz);
85309 @@ -2102,7 +2102,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
85310
85311 /* Enqueue the callback on the nocb list and update counts. */
85312 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
85313 - ACCESS_ONCE(*old_rhpp) = rhp;
85314 + ACCESS_ONCE_RW(*old_rhpp) = rhp;
85315 atomic_long_add(rhcount, &rdp->nocb_q_count);
85316 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
85317
85318 @@ -2242,12 +2242,12 @@ static int rcu_nocb_kthread(void *arg)
85319 * Extract queued callbacks, update counts, and wait
85320 * for a grace period to elapse.
85321 */
85322 - ACCESS_ONCE(rdp->nocb_head) = NULL;
85323 + ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
85324 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
85325 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
85326 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
85327 - ACCESS_ONCE(rdp->nocb_p_count) += c;
85328 - ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
85329 + ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
85330 + ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
85331 rcu_nocb_wait_gp(rdp);
85332
85333 /* Each pass through the following loop invokes a callback. */
85334 @@ -2269,8 +2269,8 @@ static int rcu_nocb_kthread(void *arg)
85335 list = next;
85336 }
85337 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
85338 - ACCESS_ONCE(rdp->nocb_p_count) -= c;
85339 - ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
85340 + ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
85341 + ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
85342 rdp->n_nocbs_invoked += c;
85343 }
85344 return 0;
85345 @@ -2297,7 +2297,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
85346 t = kthread_run(rcu_nocb_kthread, rdp,
85347 "rcuo%c/%d", rsp->abbr, cpu);
85348 BUG_ON(IS_ERR(t));
85349 - ACCESS_ONCE(rdp->nocb_kthread) = t;
85350 + ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
85351 }
85352 }
85353
85354 @@ -2423,11 +2423,11 @@ static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq)
85355
85356 /* Record start of fully idle period. */
85357 j = jiffies;
85358 - ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
85359 + ACCESS_ONCE_RW(rdtp->dynticks_idle_jiffies) = j;
85360 smp_mb__before_atomic_inc();
85361 - atomic_inc(&rdtp->dynticks_idle);
85362 + atomic_inc_unchecked(&rdtp->dynticks_idle);
85363 smp_mb__after_atomic_inc();
85364 - WARN_ON_ONCE(atomic_read(&rdtp->dynticks_idle) & 0x1);
85365 + WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1);
85366 }
85367
85368 /*
85369 @@ -2492,9 +2492,9 @@ static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq)
85370
85371 /* Record end of idle period. */
85372 smp_mb__before_atomic_inc();
85373 - atomic_inc(&rdtp->dynticks_idle);
85374 + atomic_inc_unchecked(&rdtp->dynticks_idle);
85375 smp_mb__after_atomic_inc();
85376 - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks_idle) & 0x1));
85377 + WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks_idle) & 0x1));
85378
85379 /*
85380 * If we are the timekeeping CPU, we are permitted to be non-idle
85381 @@ -2535,7 +2535,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
85382 WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu);
85383
85384 /* Pick up current idle and NMI-nesting counter and check. */
85385 - cur = atomic_read(&rdtp->dynticks_idle);
85386 + cur = atomic_read_unchecked(&rdtp->dynticks_idle);
85387 if (cur & 0x1) {
85388 *isidle = false; /* We are not idle! */
85389 return;
85390 @@ -2598,7 +2598,7 @@ static void rcu_sysidle(unsigned long j)
85391 case RCU_SYSIDLE_NOT:
85392
85393 /* First time all are idle, so note a short idle period. */
85394 - ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
85395 + ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_SHORT;
85396 break;
85397
85398 case RCU_SYSIDLE_SHORT:
85399 @@ -2635,7 +2635,7 @@ static void rcu_sysidle(unsigned long j)
85400 static void rcu_sysidle_cancel(void)
85401 {
85402 smp_mb();
85403 - ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
85404 + ACCESS_ONCE_RW(full_sysidle_state) = RCU_SYSIDLE_NOT;
85405 }
85406
85407 /*
85408 @@ -2683,7 +2683,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
85409 smp_mb(); /* grace period precedes setting inuse. */
85410
85411 rshp = container_of(rhp, struct rcu_sysidle_head, rh);
85412 - ACCESS_ONCE(rshp->inuse) = 0;
85413 + ACCESS_ONCE_RW(rshp->inuse) = 0;
85414 }
85415
85416 /*
85417 diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
85418 index cf6c174..a8f4b50 100644
85419 --- a/kernel/rcutree_trace.c
85420 +++ b/kernel/rcutree_trace.c
85421 @@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
85422 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
85423 rdp->passed_quiesce, rdp->qs_pending);
85424 seq_printf(m, " dt=%d/%llx/%d df=%lu",
85425 - atomic_read(&rdp->dynticks->dynticks),
85426 + atomic_read_unchecked(&rdp->dynticks->dynticks),
85427 rdp->dynticks->dynticks_nesting,
85428 rdp->dynticks->dynticks_nmi_nesting,
85429 rdp->dynticks_fqs);
85430 @@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
85431 struct rcu_state *rsp = (struct rcu_state *)m->private;
85432
85433 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
85434 - atomic_long_read(&rsp->expedited_start),
85435 + atomic_long_read_unchecked(&rsp->expedited_start),
85436 atomic_long_read(&rsp->expedited_done),
85437 - atomic_long_read(&rsp->expedited_wrap),
85438 - atomic_long_read(&rsp->expedited_tryfail),
85439 - atomic_long_read(&rsp->expedited_workdone1),
85440 - atomic_long_read(&rsp->expedited_workdone2),
85441 - atomic_long_read(&rsp->expedited_normal),
85442 - atomic_long_read(&rsp->expedited_stoppedcpus),
85443 - atomic_long_read(&rsp->expedited_done_tries),
85444 - atomic_long_read(&rsp->expedited_done_lost),
85445 - atomic_long_read(&rsp->expedited_done_exit));
85446 + atomic_long_read_unchecked(&rsp->expedited_wrap),
85447 + atomic_long_read_unchecked(&rsp->expedited_tryfail),
85448 + atomic_long_read_unchecked(&rsp->expedited_workdone1),
85449 + atomic_long_read_unchecked(&rsp->expedited_workdone2),
85450 + atomic_long_read_unchecked(&rsp->expedited_normal),
85451 + atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
85452 + atomic_long_read_unchecked(&rsp->expedited_done_tries),
85453 + atomic_long_read_unchecked(&rsp->expedited_done_lost),
85454 + atomic_long_read_unchecked(&rsp->expedited_done_exit));
85455 return 0;
85456 }
85457
85458 diff --git a/kernel/resource.c b/kernel/resource.c
85459 index 3f285dc..5755f62 100644
85460 --- a/kernel/resource.c
85461 +++ b/kernel/resource.c
85462 @@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
85463
85464 static int __init ioresources_init(void)
85465 {
85466 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
85467 +#ifdef CONFIG_GRKERNSEC_PROC_USER
85468 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
85469 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
85470 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
85471 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
85472 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
85473 +#endif
85474 +#else
85475 proc_create("ioports", 0, NULL, &proc_ioports_operations);
85476 proc_create("iomem", 0, NULL, &proc_iomem_operations);
85477 +#endif
85478 return 0;
85479 }
85480 __initcall(ioresources_init);
85481 diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
85482 index 1d96dd0..994ff19 100644
85483 --- a/kernel/rtmutex-tester.c
85484 +++ b/kernel/rtmutex-tester.c
85485 @@ -22,7 +22,7 @@
85486 #define MAX_RT_TEST_MUTEXES 8
85487
85488 static spinlock_t rttest_lock;
85489 -static atomic_t rttest_event;
85490 +static atomic_unchecked_t rttest_event;
85491
85492 struct test_thread_data {
85493 int opcode;
85494 @@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85495
85496 case RTTEST_LOCKCONT:
85497 td->mutexes[td->opdata] = 1;
85498 - td->event = atomic_add_return(1, &rttest_event);
85499 + td->event = atomic_add_return_unchecked(1, &rttest_event);
85500 return 0;
85501
85502 case RTTEST_RESET:
85503 @@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85504 return 0;
85505
85506 case RTTEST_RESETEVENT:
85507 - atomic_set(&rttest_event, 0);
85508 + atomic_set_unchecked(&rttest_event, 0);
85509 return 0;
85510
85511 default:
85512 @@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85513 return ret;
85514
85515 td->mutexes[id] = 1;
85516 - td->event = atomic_add_return(1, &rttest_event);
85517 + td->event = atomic_add_return_unchecked(1, &rttest_event);
85518 rt_mutex_lock(&mutexes[id]);
85519 - td->event = atomic_add_return(1, &rttest_event);
85520 + td->event = atomic_add_return_unchecked(1, &rttest_event);
85521 td->mutexes[id] = 4;
85522 return 0;
85523
85524 @@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85525 return ret;
85526
85527 td->mutexes[id] = 1;
85528 - td->event = atomic_add_return(1, &rttest_event);
85529 + td->event = atomic_add_return_unchecked(1, &rttest_event);
85530 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
85531 - td->event = atomic_add_return(1, &rttest_event);
85532 + td->event = atomic_add_return_unchecked(1, &rttest_event);
85533 td->mutexes[id] = ret ? 0 : 4;
85534 return ret ? -EINTR : 0;
85535
85536 @@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
85537 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
85538 return ret;
85539
85540 - td->event = atomic_add_return(1, &rttest_event);
85541 + td->event = atomic_add_return_unchecked(1, &rttest_event);
85542 rt_mutex_unlock(&mutexes[id]);
85543 - td->event = atomic_add_return(1, &rttest_event);
85544 + td->event = atomic_add_return_unchecked(1, &rttest_event);
85545 td->mutexes[id] = 0;
85546 return 0;
85547
85548 @@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
85549 break;
85550
85551 td->mutexes[dat] = 2;
85552 - td->event = atomic_add_return(1, &rttest_event);
85553 + td->event = atomic_add_return_unchecked(1, &rttest_event);
85554 break;
85555
85556 default:
85557 @@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
85558 return;
85559
85560 td->mutexes[dat] = 3;
85561 - td->event = atomic_add_return(1, &rttest_event);
85562 + td->event = atomic_add_return_unchecked(1, &rttest_event);
85563 break;
85564
85565 case RTTEST_LOCKNOWAIT:
85566 @@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
85567 return;
85568
85569 td->mutexes[dat] = 1;
85570 - td->event = atomic_add_return(1, &rttest_event);
85571 + td->event = atomic_add_return_unchecked(1, &rttest_event);
85572 return;
85573
85574 default:
85575 diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
85576 index 4a07353..66b5291 100644
85577 --- a/kernel/sched/auto_group.c
85578 +++ b/kernel/sched/auto_group.c
85579 @@ -11,7 +11,7 @@
85580
85581 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
85582 static struct autogroup autogroup_default;
85583 -static atomic_t autogroup_seq_nr;
85584 +static atomic_unchecked_t autogroup_seq_nr;
85585
85586 void __init autogroup_init(struct task_struct *init_task)
85587 {
85588 @@ -79,7 +79,7 @@ static inline struct autogroup *autogroup_create(void)
85589
85590 kref_init(&ag->kref);
85591 init_rwsem(&ag->lock);
85592 - ag->id = atomic_inc_return(&autogroup_seq_nr);
85593 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
85594 ag->tg = tg;
85595 #ifdef CONFIG_RT_GROUP_SCHED
85596 /*
85597 diff --git a/kernel/sched/core.c b/kernel/sched/core.c
85598 index 5ac63c9..d912786 100644
85599 --- a/kernel/sched/core.c
85600 +++ b/kernel/sched/core.c
85601 @@ -2868,7 +2868,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
85602 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
85603 * or number of jiffies left till timeout) if completed.
85604 */
85605 -long __sched
85606 +long __sched __intentional_overflow(-1)
85607 wait_for_completion_interruptible_timeout(struct completion *x,
85608 unsigned long timeout)
85609 {
85610 @@ -2885,7 +2885,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
85611 *
85612 * Return: -ERESTARTSYS if interrupted, 0 if completed.
85613 */
85614 -int __sched wait_for_completion_killable(struct completion *x)
85615 +int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
85616 {
85617 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
85618 if (t == -ERESTARTSYS)
85619 @@ -2906,7 +2906,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
85620 * Return: -ERESTARTSYS if interrupted, 0 if timed out, positive (at least 1,
85621 * or number of jiffies left till timeout) if completed.
85622 */
85623 -long __sched
85624 +long __sched __intentional_overflow(-1)
85625 wait_for_completion_killable_timeout(struct completion *x,
85626 unsigned long timeout)
85627 {
85628 @@ -3132,6 +3132,8 @@ int can_nice(const struct task_struct *p, const int nice)
85629 /* convert nice value [19,-20] to rlimit style value [1,40] */
85630 int nice_rlim = 20 - nice;
85631
85632 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
85633 +
85634 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
85635 capable(CAP_SYS_NICE));
85636 }
85637 @@ -3165,7 +3167,8 @@ SYSCALL_DEFINE1(nice, int, increment)
85638 if (nice > 19)
85639 nice = 19;
85640
85641 - if (increment < 0 && !can_nice(current, nice))
85642 + if (increment < 0 && (!can_nice(current, nice) ||
85643 + gr_handle_chroot_nice()))
85644 return -EPERM;
85645
85646 retval = security_task_setnice(current, nice);
85647 @@ -3327,6 +3330,7 @@ recheck:
85648 unsigned long rlim_rtprio =
85649 task_rlimit(p, RLIMIT_RTPRIO);
85650
85651 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
85652 /* can't set/change the rt policy */
85653 if (policy != p->policy && !rlim_rtprio)
85654 return -EPERM;
85655 @@ -4456,7 +4460,7 @@ static void migrate_tasks(unsigned int dead_cpu)
85656
85657 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
85658
85659 -static struct ctl_table sd_ctl_dir[] = {
85660 +static ctl_table_no_const sd_ctl_dir[] __read_only = {
85661 {
85662 .procname = "sched_domain",
85663 .mode = 0555,
85664 @@ -4473,17 +4477,17 @@ static struct ctl_table sd_ctl_root[] = {
85665 {}
85666 };
85667
85668 -static struct ctl_table *sd_alloc_ctl_entry(int n)
85669 +static ctl_table_no_const *sd_alloc_ctl_entry(int n)
85670 {
85671 - struct ctl_table *entry =
85672 + ctl_table_no_const *entry =
85673 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
85674
85675 return entry;
85676 }
85677
85678 -static void sd_free_ctl_entry(struct ctl_table **tablep)
85679 +static void sd_free_ctl_entry(ctl_table_no_const *tablep)
85680 {
85681 - struct ctl_table *entry;
85682 + ctl_table_no_const *entry;
85683
85684 /*
85685 * In the intermediate directories, both the child directory and
85686 @@ -4491,22 +4495,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
85687 * will always be set. In the lowest directory the names are
85688 * static strings and all have proc handlers.
85689 */
85690 - for (entry = *tablep; entry->mode; entry++) {
85691 - if (entry->child)
85692 - sd_free_ctl_entry(&entry->child);
85693 + for (entry = tablep; entry->mode; entry++) {
85694 + if (entry->child) {
85695 + sd_free_ctl_entry(entry->child);
85696 + pax_open_kernel();
85697 + entry->child = NULL;
85698 + pax_close_kernel();
85699 + }
85700 if (entry->proc_handler == NULL)
85701 kfree(entry->procname);
85702 }
85703
85704 - kfree(*tablep);
85705 - *tablep = NULL;
85706 + kfree(tablep);
85707 }
85708
85709 static int min_load_idx = 0;
85710 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
85711
85712 static void
85713 -set_table_entry(struct ctl_table *entry,
85714 +set_table_entry(ctl_table_no_const *entry,
85715 const char *procname, void *data, int maxlen,
85716 umode_t mode, proc_handler *proc_handler,
85717 bool load_idx)
85718 @@ -4526,7 +4533,7 @@ set_table_entry(struct ctl_table *entry,
85719 static struct ctl_table *
85720 sd_alloc_ctl_domain_table(struct sched_domain *sd)
85721 {
85722 - struct ctl_table *table = sd_alloc_ctl_entry(13);
85723 + ctl_table_no_const *table = sd_alloc_ctl_entry(13);
85724
85725 if (table == NULL)
85726 return NULL;
85727 @@ -4561,9 +4568,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
85728 return table;
85729 }
85730
85731 -static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu)
85732 +static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
85733 {
85734 - struct ctl_table *entry, *table;
85735 + ctl_table_no_const *entry, *table;
85736 struct sched_domain *sd;
85737 int domain_num = 0, i;
85738 char buf[32];
85739 @@ -4590,11 +4597,13 @@ static struct ctl_table_header *sd_sysctl_header;
85740 static void register_sched_domain_sysctl(void)
85741 {
85742 int i, cpu_num = num_possible_cpus();
85743 - struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
85744 + ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
85745 char buf[32];
85746
85747 WARN_ON(sd_ctl_dir[0].child);
85748 + pax_open_kernel();
85749 sd_ctl_dir[0].child = entry;
85750 + pax_close_kernel();
85751
85752 if (entry == NULL)
85753 return;
85754 @@ -4617,8 +4626,12 @@ static void unregister_sched_domain_sysctl(void)
85755 if (sd_sysctl_header)
85756 unregister_sysctl_table(sd_sysctl_header);
85757 sd_sysctl_header = NULL;
85758 - if (sd_ctl_dir[0].child)
85759 - sd_free_ctl_entry(&sd_ctl_dir[0].child);
85760 + if (sd_ctl_dir[0].child) {
85761 + sd_free_ctl_entry(sd_ctl_dir[0].child);
85762 + pax_open_kernel();
85763 + sd_ctl_dir[0].child = NULL;
85764 + pax_close_kernel();
85765 + }
85766 }
85767 #else
85768 static void register_sched_domain_sysctl(void)
85769 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
85770 index 513fc2f..906a851 100644
85771 --- a/kernel/sched/fair.c
85772 +++ b/kernel/sched/fair.c
85773 @@ -869,7 +869,7 @@ void task_numa_fault(int node, int pages, bool migrated)
85774
85775 static void reset_ptenuma_scan(struct task_struct *p)
85776 {
85777 - ACCESS_ONCE(p->mm->numa_scan_seq)++;
85778 + ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
85779 p->mm->numa_scan_offset = 0;
85780 }
85781
85782 @@ -5840,7 +5840,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
85783 * run_rebalance_domains is triggered when needed from the scheduler tick.
85784 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
85785 */
85786 -static void run_rebalance_domains(struct softirq_action *h)
85787 +static __latent_entropy void run_rebalance_domains(void)
85788 {
85789 int this_cpu = smp_processor_id();
85790 struct rq *this_rq = cpu_rq(this_cpu);
85791 diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
85792 index b3c5653..a4d192a 100644
85793 --- a/kernel/sched/sched.h
85794 +++ b/kernel/sched/sched.h
85795 @@ -1004,7 +1004,7 @@ struct sched_class {
85796 #ifdef CONFIG_FAIR_GROUP_SCHED
85797 void (*task_move_group) (struct task_struct *p, int on_rq);
85798 #endif
85799 -};
85800 +} __do_const;
85801
85802 #define sched_class_highest (&stop_sched_class)
85803 #define for_each_class(class) \
85804 diff --git a/kernel/signal.c b/kernel/signal.c
85805 index ded28b9..6886c08 100644
85806 --- a/kernel/signal.c
85807 +++ b/kernel/signal.c
85808 @@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
85809
85810 int print_fatal_signals __read_mostly;
85811
85812 -static void __user *sig_handler(struct task_struct *t, int sig)
85813 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
85814 {
85815 return t->sighand->action[sig - 1].sa.sa_handler;
85816 }
85817
85818 -static int sig_handler_ignored(void __user *handler, int sig)
85819 +static int sig_handler_ignored(__sighandler_t handler, int sig)
85820 {
85821 /* Is it explicitly or implicitly ignored? */
85822 return handler == SIG_IGN ||
85823 @@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
85824
85825 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
85826 {
85827 - void __user *handler;
85828 + __sighandler_t handler;
85829
85830 handler = sig_handler(t, sig);
85831
85832 @@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
85833 atomic_inc(&user->sigpending);
85834 rcu_read_unlock();
85835
85836 + if (!override_rlimit)
85837 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
85838 +
85839 if (override_rlimit ||
85840 atomic_read(&user->sigpending) <=
85841 task_rlimit(t, RLIMIT_SIGPENDING)) {
85842 @@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
85843
85844 int unhandled_signal(struct task_struct *tsk, int sig)
85845 {
85846 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
85847 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
85848 if (is_global_init(tsk))
85849 return 1;
85850 if (handler != SIG_IGN && handler != SIG_DFL)
85851 @@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
85852 }
85853 }
85854
85855 + /* allow glibc communication via tgkill to other threads in our
85856 + thread group */
85857 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
85858 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
85859 + && gr_handle_signal(t, sig))
85860 + return -EPERM;
85861 +
85862 return security_task_kill(t, info, sig, 0);
85863 }
85864
85865 @@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
85866 return send_signal(sig, info, p, 1);
85867 }
85868
85869 -static int
85870 +int
85871 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
85872 {
85873 return send_signal(sig, info, t, 0);
85874 @@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
85875 unsigned long int flags;
85876 int ret, blocked, ignored;
85877 struct k_sigaction *action;
85878 + int is_unhandled = 0;
85879
85880 spin_lock_irqsave(&t->sighand->siglock, flags);
85881 action = &t->sighand->action[sig-1];
85882 @@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
85883 }
85884 if (action->sa.sa_handler == SIG_DFL)
85885 t->signal->flags &= ~SIGNAL_UNKILLABLE;
85886 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
85887 + is_unhandled = 1;
85888 ret = specific_send_sig_info(sig, info, t);
85889 spin_unlock_irqrestore(&t->sighand->siglock, flags);
85890
85891 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
85892 + normal operation */
85893 + if (is_unhandled) {
85894 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
85895 + gr_handle_crash(t, sig);
85896 + }
85897 +
85898 return ret;
85899 }
85900
85901 @@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
85902 ret = check_kill_permission(sig, info, p);
85903 rcu_read_unlock();
85904
85905 - if (!ret && sig)
85906 + if (!ret && sig) {
85907 ret = do_send_sig_info(sig, info, p, true);
85908 + if (!ret)
85909 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
85910 + }
85911
85912 return ret;
85913 }
85914 @@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
85915 int error = -ESRCH;
85916
85917 rcu_read_lock();
85918 - p = find_task_by_vpid(pid);
85919 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
85920 + /* allow glibc communication via tgkill to other threads in our
85921 + thread group */
85922 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
85923 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
85924 + p = find_task_by_vpid_unrestricted(pid);
85925 + else
85926 +#endif
85927 + p = find_task_by_vpid(pid);
85928 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
85929 error = check_kill_permission(sig, info, p);
85930 /*
85931 @@ -3240,8 +3271,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
85932 }
85933 seg = get_fs();
85934 set_fs(KERNEL_DS);
85935 - ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
85936 - (stack_t __force __user *) &uoss,
85937 + ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
85938 + (stack_t __force_user *) &uoss,
85939 compat_user_stack_pointer());
85940 set_fs(seg);
85941 if (ret >= 0 && uoss_ptr) {
85942 diff --git a/kernel/smpboot.c b/kernel/smpboot.c
85943 index eb89e18..a4e6792 100644
85944 --- a/kernel/smpboot.c
85945 +++ b/kernel/smpboot.c
85946 @@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
85947 }
85948 smpboot_unpark_thread(plug_thread, cpu);
85949 }
85950 - list_add(&plug_thread->list, &hotplug_threads);
85951 + pax_list_add(&plug_thread->list, &hotplug_threads);
85952 out:
85953 mutex_unlock(&smpboot_threads_lock);
85954 return ret;
85955 @@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
85956 {
85957 get_online_cpus();
85958 mutex_lock(&smpboot_threads_lock);
85959 - list_del(&plug_thread->list);
85960 + pax_list_del(&plug_thread->list);
85961 smpboot_destroy_threads(plug_thread);
85962 mutex_unlock(&smpboot_threads_lock);
85963 put_online_cpus();
85964 diff --git a/kernel/softirq.c b/kernel/softirq.c
85965 index d7d498d..94fe0f7 100644
85966 --- a/kernel/softirq.c
85967 +++ b/kernel/softirq.c
85968 @@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
85969 EXPORT_SYMBOL(irq_stat);
85970 #endif
85971
85972 -static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
85973 +static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
85974
85975 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
85976
85977 -char *softirq_to_name[NR_SOFTIRQS] = {
85978 +const char * const softirq_to_name[NR_SOFTIRQS] = {
85979 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
85980 "TASKLET", "SCHED", "HRTIMER", "RCU"
85981 };
85982 @@ -248,7 +248,7 @@ restart:
85983 kstat_incr_softirqs_this_cpu(vec_nr);
85984
85985 trace_softirq_entry(vec_nr);
85986 - h->action(h);
85987 + h->action();
85988 trace_softirq_exit(vec_nr);
85989 if (unlikely(prev_count != preempt_count())) {
85990 printk(KERN_ERR "huh, entered softirq %u %s %p"
85991 @@ -412,7 +412,7 @@ void __raise_softirq_irqoff(unsigned int nr)
85992 or_softirq_pending(1UL << nr);
85993 }
85994
85995 -void open_softirq(int nr, void (*action)(struct softirq_action *))
85996 +void __init open_softirq(int nr, void (*action)(void))
85997 {
85998 softirq_vec[nr].action = action;
85999 }
86000 @@ -468,7 +468,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
86001
86002 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
86003
86004 -static void tasklet_action(struct softirq_action *a)
86005 +static __latent_entropy void tasklet_action(void)
86006 {
86007 struct tasklet_struct *list;
86008
86009 @@ -503,7 +503,7 @@ static void tasklet_action(struct softirq_action *a)
86010 }
86011 }
86012
86013 -static void tasklet_hi_action(struct softirq_action *a)
86014 +static __latent_entropy void tasklet_hi_action(void)
86015 {
86016 struct tasklet_struct *list;
86017
86018 @@ -858,7 +858,7 @@ static struct notifier_block cpu_nfb = {
86019 .notifier_call = cpu_callback
86020 };
86021
86022 -static struct smp_hotplug_thread softirq_threads = {
86023 +static struct smp_hotplug_thread softirq_threads __read_only = {
86024 .store = &ksoftirqd,
86025 .thread_should_run = ksoftirqd_should_run,
86026 .thread_fn = run_ksoftirqd,
86027 diff --git a/kernel/srcu.c b/kernel/srcu.c
86028 index 01d5ccb..cdcbee6 100644
86029 --- a/kernel/srcu.c
86030 +++ b/kernel/srcu.c
86031 @@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
86032
86033 idx = ACCESS_ONCE(sp->completed) & 0x1;
86034 preempt_disable();
86035 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
86036 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
86037 smp_mb(); /* B */ /* Avoid leaking the critical section. */
86038 - ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
86039 + ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
86040 preempt_enable();
86041 return idx;
86042 }
86043 diff --git a/kernel/sys.c b/kernel/sys.c
86044 index c18ecca..b3c2dd2 100644
86045 --- a/kernel/sys.c
86046 +++ b/kernel/sys.c
86047 @@ -149,6 +149,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
86048 error = -EACCES;
86049 goto out;
86050 }
86051 +
86052 + if (gr_handle_chroot_setpriority(p, niceval)) {
86053 + error = -EACCES;
86054 + goto out;
86055 + }
86056 +
86057 no_nice = security_task_setnice(p, niceval);
86058 if (no_nice) {
86059 error = no_nice;
86060 @@ -352,6 +358,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
86061 goto error;
86062 }
86063
86064 + if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
86065 + goto error;
86066 +
86067 if (rgid != (gid_t) -1 ||
86068 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
86069 new->sgid = new->egid;
86070 @@ -387,6 +396,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
86071 old = current_cred();
86072
86073 retval = -EPERM;
86074 +
86075 + if (gr_check_group_change(kgid, kgid, kgid))
86076 + goto error;
86077 +
86078 if (ns_capable(old->user_ns, CAP_SETGID))
86079 new->gid = new->egid = new->sgid = new->fsgid = kgid;
86080 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
86081 @@ -404,7 +417,7 @@ error:
86082 /*
86083 * change the user struct in a credentials set to match the new UID
86084 */
86085 -static int set_user(struct cred *new)
86086 +int set_user(struct cred *new)
86087 {
86088 struct user_struct *new_user;
86089
86090 @@ -484,6 +497,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
86091 goto error;
86092 }
86093
86094 + if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
86095 + goto error;
86096 +
86097 if (!uid_eq(new->uid, old->uid)) {
86098 retval = set_user(new);
86099 if (retval < 0)
86100 @@ -534,6 +550,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
86101 old = current_cred();
86102
86103 retval = -EPERM;
86104 +
86105 + if (gr_check_crash_uid(kuid))
86106 + goto error;
86107 + if (gr_check_user_change(kuid, kuid, kuid))
86108 + goto error;
86109 +
86110 if (ns_capable(old->user_ns, CAP_SETUID)) {
86111 new->suid = new->uid = kuid;
86112 if (!uid_eq(kuid, old->uid)) {
86113 @@ -603,6 +625,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
86114 goto error;
86115 }
86116
86117 + if (gr_check_user_change(kruid, keuid, INVALID_UID))
86118 + goto error;
86119 +
86120 if (ruid != (uid_t) -1) {
86121 new->uid = kruid;
86122 if (!uid_eq(kruid, old->uid)) {
86123 @@ -685,6 +710,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
86124 goto error;
86125 }
86126
86127 + if (gr_check_group_change(krgid, kegid, INVALID_GID))
86128 + goto error;
86129 +
86130 if (rgid != (gid_t) -1)
86131 new->gid = krgid;
86132 if (egid != (gid_t) -1)
86133 @@ -746,12 +774,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
86134 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
86135 ns_capable(old->user_ns, CAP_SETUID)) {
86136 if (!uid_eq(kuid, old->fsuid)) {
86137 + if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
86138 + goto error;
86139 +
86140 new->fsuid = kuid;
86141 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
86142 goto change_okay;
86143 }
86144 }
86145
86146 +error:
86147 abort_creds(new);
86148 return old_fsuid;
86149
86150 @@ -784,12 +816,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
86151 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
86152 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
86153 ns_capable(old->user_ns, CAP_SETGID)) {
86154 + if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
86155 + goto error;
86156 +
86157 if (!gid_eq(kgid, old->fsgid)) {
86158 new->fsgid = kgid;
86159 goto change_okay;
86160 }
86161 }
86162
86163 +error:
86164 abort_creds(new);
86165 return old_fsgid;
86166
86167 @@ -1169,19 +1205,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
86168 return -EFAULT;
86169
86170 down_read(&uts_sem);
86171 - error = __copy_to_user(&name->sysname, &utsname()->sysname,
86172 + error = __copy_to_user(name->sysname, &utsname()->sysname,
86173 __OLD_UTS_LEN);
86174 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
86175 - error |= __copy_to_user(&name->nodename, &utsname()->nodename,
86176 + error |= __copy_to_user(name->nodename, &utsname()->nodename,
86177 __OLD_UTS_LEN);
86178 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
86179 - error |= __copy_to_user(&name->release, &utsname()->release,
86180 + error |= __copy_to_user(name->release, &utsname()->release,
86181 __OLD_UTS_LEN);
86182 error |= __put_user(0, name->release + __OLD_UTS_LEN);
86183 - error |= __copy_to_user(&name->version, &utsname()->version,
86184 + error |= __copy_to_user(name->version, &utsname()->version,
86185 __OLD_UTS_LEN);
86186 error |= __put_user(0, name->version + __OLD_UTS_LEN);
86187 - error |= __copy_to_user(&name->machine, &utsname()->machine,
86188 + error |= __copy_to_user(name->machine, &utsname()->machine,
86189 __OLD_UTS_LEN);
86190 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
86191 up_read(&uts_sem);
86192 @@ -1383,6 +1419,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
86193 */
86194 new_rlim->rlim_cur = 1;
86195 }
86196 + /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
86197 + is changed to a lower value. Since tasks can be created by the same
86198 + user in between this limit change and an execve by this task, force
86199 + a recheck only for this task by setting PF_NPROC_EXCEEDED
86200 + */
86201 + if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
86202 + tsk->flags |= PF_NPROC_EXCEEDED;
86203 }
86204 if (!retval) {
86205 if (old_rlim)
86206 diff --git a/kernel/sysctl.c b/kernel/sysctl.c
86207 index b2f06f3..e6354ab 100644
86208 --- a/kernel/sysctl.c
86209 +++ b/kernel/sysctl.c
86210 @@ -93,7 +93,6 @@
86211
86212
86213 #if defined(CONFIG_SYSCTL)
86214 -
86215 /* External variables not in a header file. */
86216 extern int sysctl_overcommit_memory;
86217 extern int sysctl_overcommit_ratio;
86218 @@ -119,17 +118,18 @@ extern int blk_iopoll_enabled;
86219
86220 /* Constants used for minimum and maximum */
86221 #ifdef CONFIG_LOCKUP_DETECTOR
86222 -static int sixty = 60;
86223 +static int sixty __read_only = 60;
86224 #endif
86225
86226 -static int zero;
86227 -static int __maybe_unused one = 1;
86228 -static int __maybe_unused two = 2;
86229 -static int __maybe_unused three = 3;
86230 -static unsigned long one_ul = 1;
86231 -static int one_hundred = 100;
86232 +static int neg_one __read_only = -1;
86233 +static int zero __read_only = 0;
86234 +static int __maybe_unused one __read_only = 1;
86235 +static int __maybe_unused two __read_only = 2;
86236 +static int __maybe_unused three __read_only = 3;
86237 +static unsigned long one_ul __read_only = 1;
86238 +static int one_hundred __read_only = 100;
86239 #ifdef CONFIG_PRINTK
86240 -static int ten_thousand = 10000;
86241 +static int ten_thousand __read_only = 10000;
86242 #endif
86243
86244 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
86245 @@ -176,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
86246 void __user *buffer, size_t *lenp, loff_t *ppos);
86247 #endif
86248
86249 -#ifdef CONFIG_PRINTK
86250 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
86251 void __user *buffer, size_t *lenp, loff_t *ppos);
86252 -#endif
86253
86254 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
86255 void __user *buffer, size_t *lenp, loff_t *ppos);
86256 @@ -210,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
86257
86258 #endif
86259
86260 +extern struct ctl_table grsecurity_table[];
86261 +
86262 static struct ctl_table kern_table[];
86263 static struct ctl_table vm_table[];
86264 static struct ctl_table fs_table[];
86265 @@ -224,6 +224,20 @@ extern struct ctl_table epoll_table[];
86266 int sysctl_legacy_va_layout;
86267 #endif
86268
86269 +#ifdef CONFIG_PAX_SOFTMODE
86270 +static ctl_table pax_table[] = {
86271 + {
86272 + .procname = "softmode",
86273 + .data = &pax_softmode,
86274 + .maxlen = sizeof(unsigned int),
86275 + .mode = 0600,
86276 + .proc_handler = &proc_dointvec,
86277 + },
86278 +
86279 + { }
86280 +};
86281 +#endif
86282 +
86283 /* The default sysctl tables: */
86284
86285 static struct ctl_table sysctl_base_table[] = {
86286 @@ -272,6 +286,22 @@ static int max_extfrag_threshold = 1000;
86287 #endif
86288
86289 static struct ctl_table kern_table[] = {
86290 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
86291 + {
86292 + .procname = "grsecurity",
86293 + .mode = 0500,
86294 + .child = grsecurity_table,
86295 + },
86296 +#endif
86297 +
86298 +#ifdef CONFIG_PAX_SOFTMODE
86299 + {
86300 + .procname = "pax",
86301 + .mode = 0500,
86302 + .child = pax_table,
86303 + },
86304 +#endif
86305 +
86306 {
86307 .procname = "sched_child_runs_first",
86308 .data = &sysctl_sched_child_runs_first,
86309 @@ -613,7 +643,7 @@ static struct ctl_table kern_table[] = {
86310 .data = &modprobe_path,
86311 .maxlen = KMOD_PATH_LEN,
86312 .mode = 0644,
86313 - .proc_handler = proc_dostring,
86314 + .proc_handler = proc_dostring_modpriv,
86315 },
86316 {
86317 .procname = "modules_disabled",
86318 @@ -780,16 +810,20 @@ static struct ctl_table kern_table[] = {
86319 .extra1 = &zero,
86320 .extra2 = &one,
86321 },
86322 +#endif
86323 {
86324 .procname = "kptr_restrict",
86325 .data = &kptr_restrict,
86326 .maxlen = sizeof(int),
86327 .mode = 0644,
86328 .proc_handler = proc_dointvec_minmax_sysadmin,
86329 +#ifdef CONFIG_GRKERNSEC_HIDESYM
86330 + .extra1 = &two,
86331 +#else
86332 .extra1 = &zero,
86333 +#endif
86334 .extra2 = &two,
86335 },
86336 -#endif
86337 {
86338 .procname = "ngroups_max",
86339 .data = &ngroups_max,
86340 @@ -1031,10 +1065,17 @@ static struct ctl_table kern_table[] = {
86341 */
86342 {
86343 .procname = "perf_event_paranoid",
86344 - .data = &sysctl_perf_event_paranoid,
86345 - .maxlen = sizeof(sysctl_perf_event_paranoid),
86346 + .data = &sysctl_perf_event_legitimately_concerned,
86347 + .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
86348 .mode = 0644,
86349 - .proc_handler = proc_dointvec,
86350 + /* go ahead, be a hero */
86351 + .proc_handler = proc_dointvec_minmax_sysadmin,
86352 + .extra1 = &neg_one,
86353 +#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
86354 + .extra2 = &three,
86355 +#else
86356 + .extra2 = &two,
86357 +#endif
86358 },
86359 {
86360 .procname = "perf_event_mlock_kb",
86361 @@ -1297,6 +1338,13 @@ static struct ctl_table vm_table[] = {
86362 .proc_handler = proc_dointvec_minmax,
86363 .extra1 = &zero,
86364 },
86365 + {
86366 + .procname = "heap_stack_gap",
86367 + .data = &sysctl_heap_stack_gap,
86368 + .maxlen = sizeof(sysctl_heap_stack_gap),
86369 + .mode = 0644,
86370 + .proc_handler = proc_doulongvec_minmax,
86371 + },
86372 #else
86373 {
86374 .procname = "nr_trim_pages",
86375 @@ -1761,6 +1809,16 @@ int proc_dostring(struct ctl_table *table, int write,
86376 buffer, lenp, ppos);
86377 }
86378
86379 +int proc_dostring_modpriv(struct ctl_table *table, int write,
86380 + void __user *buffer, size_t *lenp, loff_t *ppos)
86381 +{
86382 + if (write && !capable(CAP_SYS_MODULE))
86383 + return -EPERM;
86384 +
86385 + return _proc_do_string(table->data, table->maxlen, write,
86386 + buffer, lenp, ppos);
86387 +}
86388 +
86389 static size_t proc_skip_spaces(char **buf)
86390 {
86391 size_t ret;
86392 @@ -1866,6 +1924,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
86393 len = strlen(tmp);
86394 if (len > *size)
86395 len = *size;
86396 + if (len > sizeof(tmp))
86397 + len = sizeof(tmp);
86398 if (copy_to_user(*buf, tmp, len))
86399 return -EFAULT;
86400 *size -= len;
86401 @@ -2030,7 +2090,7 @@ int proc_dointvec(struct ctl_table *table, int write,
86402 static int proc_taint(struct ctl_table *table, int write,
86403 void __user *buffer, size_t *lenp, loff_t *ppos)
86404 {
86405 - struct ctl_table t;
86406 + ctl_table_no_const t;
86407 unsigned long tmptaint = get_taint();
86408 int err;
86409
86410 @@ -2058,7 +2118,6 @@ static int proc_taint(struct ctl_table *table, int write,
86411 return err;
86412 }
86413
86414 -#ifdef CONFIG_PRINTK
86415 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
86416 void __user *buffer, size_t *lenp, loff_t *ppos)
86417 {
86418 @@ -2067,7 +2126,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
86419
86420 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
86421 }
86422 -#endif
86423
86424 struct do_proc_dointvec_minmax_conv_param {
86425 int *min;
86426 @@ -2214,8 +2272,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
86427 *i = val;
86428 } else {
86429 val = convdiv * (*i) / convmul;
86430 - if (!first)
86431 + if (!first) {
86432 err = proc_put_char(&buffer, &left, '\t');
86433 + if (err)
86434 + break;
86435 + }
86436 err = proc_put_long(&buffer, &left, val, false);
86437 if (err)
86438 break;
86439 @@ -2611,6 +2672,12 @@ int proc_dostring(struct ctl_table *table, int write,
86440 return -ENOSYS;
86441 }
86442
86443 +int proc_dostring_modpriv(struct ctl_table *table, int write,
86444 + void __user *buffer, size_t *lenp, loff_t *ppos)
86445 +{
86446 + return -ENOSYS;
86447 +}
86448 +
86449 int proc_dointvec(struct ctl_table *table, int write,
86450 void __user *buffer, size_t *lenp, loff_t *ppos)
86451 {
86452 @@ -2667,5 +2734,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
86453 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
86454 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
86455 EXPORT_SYMBOL(proc_dostring);
86456 +EXPORT_SYMBOL(proc_dostring_modpriv);
86457 EXPORT_SYMBOL(proc_doulongvec_minmax);
86458 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
86459 diff --git a/kernel/taskstats.c b/kernel/taskstats.c
86460 index 145bb4d..b2aa969 100644
86461 --- a/kernel/taskstats.c
86462 +++ b/kernel/taskstats.c
86463 @@ -28,9 +28,12 @@
86464 #include <linux/fs.h>
86465 #include <linux/file.h>
86466 #include <linux/pid_namespace.h>
86467 +#include <linux/grsecurity.h>
86468 #include <net/genetlink.h>
86469 #include <linux/atomic.h>
86470
86471 +extern int gr_is_taskstats_denied(int pid);
86472 +
86473 /*
86474 * Maximum length of a cpumask that can be specified in
86475 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
86476 @@ -570,6 +573,9 @@ err:
86477
86478 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
86479 {
86480 + if (gr_is_taskstats_denied(current->pid))
86481 + return -EACCES;
86482 +
86483 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
86484 return cmd_attr_register_cpumask(info);
86485 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
86486 diff --git a/kernel/time.c b/kernel/time.c
86487 index 7c7964c..2a0d412 100644
86488 --- a/kernel/time.c
86489 +++ b/kernel/time.c
86490 @@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
86491 return error;
86492
86493 if (tz) {
86494 + /* we log in do_settimeofday called below, so don't log twice
86495 + */
86496 + if (!tv)
86497 + gr_log_timechange();
86498 +
86499 sys_tz = *tz;
86500 update_vsyscall_tz();
86501 if (firsttime) {
86502 diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
86503 index 88c9c65..7497ebc 100644
86504 --- a/kernel/time/alarmtimer.c
86505 +++ b/kernel/time/alarmtimer.c
86506 @@ -795,7 +795,7 @@ static int __init alarmtimer_init(void)
86507 struct platform_device *pdev;
86508 int error = 0;
86509 int i;
86510 - struct k_clock alarm_clock = {
86511 + static struct k_clock alarm_clock = {
86512 .clock_getres = alarm_clock_getres,
86513 .clock_get = alarm_clock_get,
86514 .timer_create = alarm_timer_create,
86515 diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
86516 index 5cf6c70..ac341b0 100644
86517 --- a/kernel/time/timekeeping.c
86518 +++ b/kernel/time/timekeeping.c
86519 @@ -15,6 +15,7 @@
86520 #include <linux/init.h>
86521 #include <linux/mm.h>
86522 #include <linux/sched.h>
86523 +#include <linux/grsecurity.h>
86524 #include <linux/syscore_ops.h>
86525 #include <linux/clocksource.h>
86526 #include <linux/jiffies.h>
86527 @@ -500,6 +501,8 @@ int do_settimeofday(const struct timespec *tv)
86528 if (!timespec_valid_strict(tv))
86529 return -EINVAL;
86530
86531 + gr_log_timechange();
86532 +
86533 raw_spin_lock_irqsave(&timekeeper_lock, flags);
86534 write_seqcount_begin(&timekeeper_seq);
86535
86536 diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
86537 index 61ed862..3b52c65 100644
86538 --- a/kernel/time/timer_list.c
86539 +++ b/kernel/time/timer_list.c
86540 @@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
86541
86542 static void print_name_offset(struct seq_file *m, void *sym)
86543 {
86544 +#ifdef CONFIG_GRKERNSEC_HIDESYM
86545 + SEQ_printf(m, "<%p>", NULL);
86546 +#else
86547 char symname[KSYM_NAME_LEN];
86548
86549 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
86550 SEQ_printf(m, "<%pK>", sym);
86551 else
86552 SEQ_printf(m, "%s", symname);
86553 +#endif
86554 }
86555
86556 static void
86557 @@ -119,7 +123,11 @@ next_one:
86558 static void
86559 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
86560 {
86561 +#ifdef CONFIG_GRKERNSEC_HIDESYM
86562 + SEQ_printf(m, " .base: %p\n", NULL);
86563 +#else
86564 SEQ_printf(m, " .base: %pK\n", base);
86565 +#endif
86566 SEQ_printf(m, " .index: %d\n",
86567 base->index);
86568 SEQ_printf(m, " .resolution: %Lu nsecs\n",
86569 @@ -362,7 +370,11 @@ static int __init init_timer_list_procfs(void)
86570 {
86571 struct proc_dir_entry *pe;
86572
86573 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
86574 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
86575 +#else
86576 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
86577 +#endif
86578 if (!pe)
86579 return -ENOMEM;
86580 return 0;
86581 diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
86582 index 0b537f2..40d6c20 100644
86583 --- a/kernel/time/timer_stats.c
86584 +++ b/kernel/time/timer_stats.c
86585 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
86586 static unsigned long nr_entries;
86587 static struct entry entries[MAX_ENTRIES];
86588
86589 -static atomic_t overflow_count;
86590 +static atomic_unchecked_t overflow_count;
86591
86592 /*
86593 * The entries are in a hash-table, for fast lookup:
86594 @@ -140,7 +140,7 @@ static void reset_entries(void)
86595 nr_entries = 0;
86596 memset(entries, 0, sizeof(entries));
86597 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
86598 - atomic_set(&overflow_count, 0);
86599 + atomic_set_unchecked(&overflow_count, 0);
86600 }
86601
86602 static struct entry *alloc_entry(void)
86603 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
86604 if (likely(entry))
86605 entry->count++;
86606 else
86607 - atomic_inc(&overflow_count);
86608 + atomic_inc_unchecked(&overflow_count);
86609
86610 out_unlock:
86611 raw_spin_unlock_irqrestore(lock, flags);
86612 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
86613
86614 static void print_name_offset(struct seq_file *m, unsigned long addr)
86615 {
86616 +#ifdef CONFIG_GRKERNSEC_HIDESYM
86617 + seq_printf(m, "<%p>", NULL);
86618 +#else
86619 char symname[KSYM_NAME_LEN];
86620
86621 if (lookup_symbol_name(addr, symname) < 0)
86622 - seq_printf(m, "<%p>", (void *)addr);
86623 + seq_printf(m, "<%pK>", (void *)addr);
86624 else
86625 seq_printf(m, "%s", symname);
86626 +#endif
86627 }
86628
86629 static int tstats_show(struct seq_file *m, void *v)
86630 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
86631
86632 seq_puts(m, "Timer Stats Version: v0.2\n");
86633 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
86634 - if (atomic_read(&overflow_count))
86635 + if (atomic_read_unchecked(&overflow_count))
86636 seq_printf(m, "Overflow: %d entries\n",
86637 - atomic_read(&overflow_count));
86638 + atomic_read_unchecked(&overflow_count));
86639
86640 for (i = 0; i < nr_entries; i++) {
86641 entry = entries + i;
86642 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
86643 {
86644 struct proc_dir_entry *pe;
86645
86646 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
86647 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
86648 +#else
86649 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
86650 +#endif
86651 if (!pe)
86652 return -ENOMEM;
86653 return 0;
86654 diff --git a/kernel/timer.c b/kernel/timer.c
86655 index 4296d13..0164b04 100644
86656 --- a/kernel/timer.c
86657 +++ b/kernel/timer.c
86658 @@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
86659 /*
86660 * This function runs timers and the timer-tq in bottom half context.
86661 */
86662 -static void run_timer_softirq(struct softirq_action *h)
86663 +static __latent_entropy void run_timer_softirq(void)
86664 {
86665 struct tvec_base *base = __this_cpu_read(tvec_bases);
86666
86667 @@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
86668 *
86669 * In all cases the return value is guaranteed to be non-negative.
86670 */
86671 -signed long __sched schedule_timeout(signed long timeout)
86672 +signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
86673 {
86674 struct timer_list timer;
86675 unsigned long expire;
86676 diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
86677 index b8b8560..75b1a09 100644
86678 --- a/kernel/trace/blktrace.c
86679 +++ b/kernel/trace/blktrace.c
86680 @@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
86681 struct blk_trace *bt = filp->private_data;
86682 char buf[16];
86683
86684 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
86685 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
86686
86687 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
86688 }
86689 @@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
86690 return 1;
86691
86692 bt = buf->chan->private_data;
86693 - atomic_inc(&bt->dropped);
86694 + atomic_inc_unchecked(&bt->dropped);
86695 return 0;
86696 }
86697
86698 @@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
86699
86700 bt->dir = dir;
86701 bt->dev = dev;
86702 - atomic_set(&bt->dropped, 0);
86703 + atomic_set_unchecked(&bt->dropped, 0);
86704
86705 ret = -EIO;
86706 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
86707 diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
86708 index f3bd09ee..9bb9586 100644
86709 --- a/kernel/trace/ftrace.c
86710 +++ b/kernel/trace/ftrace.c
86711 @@ -1944,12 +1944,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
86712 if (unlikely(ftrace_disabled))
86713 return 0;
86714
86715 + ret = ftrace_arch_code_modify_prepare();
86716 + FTRACE_WARN_ON(ret);
86717 + if (ret)
86718 + return 0;
86719 +
86720 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
86721 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
86722 if (ret) {
86723 ftrace_bug(ret, ip);
86724 - return 0;
86725 }
86726 - return 1;
86727 + return ret ? 0 : 1;
86728 }
86729
86730 /*
86731 @@ -4043,8 +4048,10 @@ static int ftrace_process_locs(struct module *mod,
86732 if (!count)
86733 return 0;
86734
86735 + pax_open_kernel();
86736 sort(start, count, sizeof(*start),
86737 ftrace_cmp_ips, ftrace_swap_ips);
86738 + pax_close_kernel();
86739
86740 start_pg = ftrace_allocate_pages(count);
86741 if (!start_pg)
86742 @@ -4766,8 +4773,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
86743 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
86744
86745 static int ftrace_graph_active;
86746 -static struct notifier_block ftrace_suspend_notifier;
86747 -
86748 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
86749 {
86750 return 0;
86751 @@ -4918,6 +4923,10 @@ static struct ftrace_ops fgraph_ops __read_mostly = {
86752 FTRACE_OPS_FL_RECURSION_SAFE,
86753 };
86754
86755 +static struct notifier_block ftrace_suspend_notifier = {
86756 + .notifier_call = ftrace_suspend_notifier_call
86757 +};
86758 +
86759 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
86760 trace_func_graph_ent_t entryfunc)
86761 {
86762 @@ -4931,7 +4940,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
86763 goto out;
86764 }
86765
86766 - ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
86767 register_pm_notifier(&ftrace_suspend_notifier);
86768
86769 ftrace_graph_active++;
86770 diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
86771 index cc2f66f..05edd54 100644
86772 --- a/kernel/trace/ring_buffer.c
86773 +++ b/kernel/trace/ring_buffer.c
86774 @@ -352,9 +352,9 @@ struct buffer_data_page {
86775 */
86776 struct buffer_page {
86777 struct list_head list; /* list of buffer pages */
86778 - local_t write; /* index for next write */
86779 + local_unchecked_t write; /* index for next write */
86780 unsigned read; /* index for next read */
86781 - local_t entries; /* entries on this page */
86782 + local_unchecked_t entries; /* entries on this page */
86783 unsigned long real_end; /* real end of data */
86784 struct buffer_data_page *page; /* Actual data page */
86785 };
86786 @@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
86787 unsigned long last_overrun;
86788 local_t entries_bytes;
86789 local_t entries;
86790 - local_t overrun;
86791 - local_t commit_overrun;
86792 + local_unchecked_t overrun;
86793 + local_unchecked_t commit_overrun;
86794 local_t dropped_events;
86795 local_t committing;
86796 local_t commits;
86797 @@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
86798 *
86799 * We add a counter to the write field to denote this.
86800 */
86801 - old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
86802 - old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
86803 + old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
86804 + old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
86805
86806 /*
86807 * Just make sure we have seen our old_write and synchronize
86808 @@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
86809 * cmpxchg to only update if an interrupt did not already
86810 * do it for us. If the cmpxchg fails, we don't care.
86811 */
86812 - (void)local_cmpxchg(&next_page->write, old_write, val);
86813 - (void)local_cmpxchg(&next_page->entries, old_entries, eval);
86814 + (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
86815 + (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
86816
86817 /*
86818 * No need to worry about races with clearing out the commit.
86819 @@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
86820
86821 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
86822 {
86823 - return local_read(&bpage->entries) & RB_WRITE_MASK;
86824 + return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
86825 }
86826
86827 static inline unsigned long rb_page_write(struct buffer_page *bpage)
86828 {
86829 - return local_read(&bpage->write) & RB_WRITE_MASK;
86830 + return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
86831 }
86832
86833 static int
86834 @@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
86835 * bytes consumed in ring buffer from here.
86836 * Increment overrun to account for the lost events.
86837 */
86838 - local_add(page_entries, &cpu_buffer->overrun);
86839 + local_add_unchecked(page_entries, &cpu_buffer->overrun);
86840 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
86841 }
86842
86843 @@ -2064,7 +2064,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
86844 * it is our responsibility to update
86845 * the counters.
86846 */
86847 - local_add(entries, &cpu_buffer->overrun);
86848 + local_add_unchecked(entries, &cpu_buffer->overrun);
86849 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
86850
86851 /*
86852 @@ -2214,7 +2214,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
86853 if (tail == BUF_PAGE_SIZE)
86854 tail_page->real_end = 0;
86855
86856 - local_sub(length, &tail_page->write);
86857 + local_sub_unchecked(length, &tail_page->write);
86858 return;
86859 }
86860
86861 @@ -2249,7 +2249,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
86862 rb_event_set_padding(event);
86863
86864 /* Set the write back to the previous setting */
86865 - local_sub(length, &tail_page->write);
86866 + local_sub_unchecked(length, &tail_page->write);
86867 return;
86868 }
86869
86870 @@ -2261,7 +2261,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
86871
86872 /* Set write to end of buffer */
86873 length = (tail + length) - BUF_PAGE_SIZE;
86874 - local_sub(length, &tail_page->write);
86875 + local_sub_unchecked(length, &tail_page->write);
86876 }
86877
86878 /*
86879 @@ -2287,7 +2287,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
86880 * about it.
86881 */
86882 if (unlikely(next_page == commit_page)) {
86883 - local_inc(&cpu_buffer->commit_overrun);
86884 + local_inc_unchecked(&cpu_buffer->commit_overrun);
86885 goto out_reset;
86886 }
86887
86888 @@ -2343,7 +2343,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
86889 cpu_buffer->tail_page) &&
86890 (cpu_buffer->commit_page ==
86891 cpu_buffer->reader_page))) {
86892 - local_inc(&cpu_buffer->commit_overrun);
86893 + local_inc_unchecked(&cpu_buffer->commit_overrun);
86894 goto out_reset;
86895 }
86896 }
86897 @@ -2391,7 +2391,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
86898 length += RB_LEN_TIME_EXTEND;
86899
86900 tail_page = cpu_buffer->tail_page;
86901 - write = local_add_return(length, &tail_page->write);
86902 + write = local_add_return_unchecked(length, &tail_page->write);
86903
86904 /* set write to only the index of the write */
86905 write &= RB_WRITE_MASK;
86906 @@ -2408,7 +2408,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
86907 kmemcheck_annotate_bitfield(event, bitfield);
86908 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
86909
86910 - local_inc(&tail_page->entries);
86911 + local_inc_unchecked(&tail_page->entries);
86912
86913 /*
86914 * If this is the first commit on the page, then update
86915 @@ -2441,7 +2441,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
86916
86917 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
86918 unsigned long write_mask =
86919 - local_read(&bpage->write) & ~RB_WRITE_MASK;
86920 + local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
86921 unsigned long event_length = rb_event_length(event);
86922 /*
86923 * This is on the tail page. It is possible that
86924 @@ -2451,7 +2451,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
86925 */
86926 old_index += write_mask;
86927 new_index += write_mask;
86928 - index = local_cmpxchg(&bpage->write, old_index, new_index);
86929 + index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
86930 if (index == old_index) {
86931 /* update counters */
86932 local_sub(event_length, &cpu_buffer->entries_bytes);
86933 @@ -2843,7 +2843,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
86934
86935 /* Do the likely case first */
86936 if (likely(bpage->page == (void *)addr)) {
86937 - local_dec(&bpage->entries);
86938 + local_dec_unchecked(&bpage->entries);
86939 return;
86940 }
86941
86942 @@ -2855,7 +2855,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
86943 start = bpage;
86944 do {
86945 if (bpage->page == (void *)addr) {
86946 - local_dec(&bpage->entries);
86947 + local_dec_unchecked(&bpage->entries);
86948 return;
86949 }
86950 rb_inc_page(cpu_buffer, &bpage);
86951 @@ -3139,7 +3139,7 @@ static inline unsigned long
86952 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
86953 {
86954 return local_read(&cpu_buffer->entries) -
86955 - (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
86956 + (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
86957 }
86958
86959 /**
86960 @@ -3228,7 +3228,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
86961 return 0;
86962
86963 cpu_buffer = buffer->buffers[cpu];
86964 - ret = local_read(&cpu_buffer->overrun);
86965 + ret = local_read_unchecked(&cpu_buffer->overrun);
86966
86967 return ret;
86968 }
86969 @@ -3251,7 +3251,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
86970 return 0;
86971
86972 cpu_buffer = buffer->buffers[cpu];
86973 - ret = local_read(&cpu_buffer->commit_overrun);
86974 + ret = local_read_unchecked(&cpu_buffer->commit_overrun);
86975
86976 return ret;
86977 }
86978 @@ -3336,7 +3336,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
86979 /* if you care about this being correct, lock the buffer */
86980 for_each_buffer_cpu(buffer, cpu) {
86981 cpu_buffer = buffer->buffers[cpu];
86982 - overruns += local_read(&cpu_buffer->overrun);
86983 + overruns += local_read_unchecked(&cpu_buffer->overrun);
86984 }
86985
86986 return overruns;
86987 @@ -3512,8 +3512,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
86988 /*
86989 * Reset the reader page to size zero.
86990 */
86991 - local_set(&cpu_buffer->reader_page->write, 0);
86992 - local_set(&cpu_buffer->reader_page->entries, 0);
86993 + local_set_unchecked(&cpu_buffer->reader_page->write, 0);
86994 + local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
86995 local_set(&cpu_buffer->reader_page->page->commit, 0);
86996 cpu_buffer->reader_page->real_end = 0;
86997
86998 @@ -3547,7 +3547,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
86999 * want to compare with the last_overrun.
87000 */
87001 smp_mb();
87002 - overwrite = local_read(&(cpu_buffer->overrun));
87003 + overwrite = local_read_unchecked(&(cpu_buffer->overrun));
87004
87005 /*
87006 * Here's the tricky part.
87007 @@ -4117,8 +4117,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
87008
87009 cpu_buffer->head_page
87010 = list_entry(cpu_buffer->pages, struct buffer_page, list);
87011 - local_set(&cpu_buffer->head_page->write, 0);
87012 - local_set(&cpu_buffer->head_page->entries, 0);
87013 + local_set_unchecked(&cpu_buffer->head_page->write, 0);
87014 + local_set_unchecked(&cpu_buffer->head_page->entries, 0);
87015 local_set(&cpu_buffer->head_page->page->commit, 0);
87016
87017 cpu_buffer->head_page->read = 0;
87018 @@ -4128,14 +4128,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
87019
87020 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
87021 INIT_LIST_HEAD(&cpu_buffer->new_pages);
87022 - local_set(&cpu_buffer->reader_page->write, 0);
87023 - local_set(&cpu_buffer->reader_page->entries, 0);
87024 + local_set_unchecked(&cpu_buffer->reader_page->write, 0);
87025 + local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
87026 local_set(&cpu_buffer->reader_page->page->commit, 0);
87027 cpu_buffer->reader_page->read = 0;
87028
87029 local_set(&cpu_buffer->entries_bytes, 0);
87030 - local_set(&cpu_buffer->overrun, 0);
87031 - local_set(&cpu_buffer->commit_overrun, 0);
87032 + local_set_unchecked(&cpu_buffer->overrun, 0);
87033 + local_set_unchecked(&cpu_buffer->commit_overrun, 0);
87034 local_set(&cpu_buffer->dropped_events, 0);
87035 local_set(&cpu_buffer->entries, 0);
87036 local_set(&cpu_buffer->committing, 0);
87037 @@ -4540,8 +4540,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
87038 rb_init_page(bpage);
87039 bpage = reader->page;
87040 reader->page = *data_page;
87041 - local_set(&reader->write, 0);
87042 - local_set(&reader->entries, 0);
87043 + local_set_unchecked(&reader->write, 0);
87044 + local_set_unchecked(&reader->entries, 0);
87045 reader->read = 0;
87046 *data_page = bpage;
87047
87048 diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
87049 index b778e96..4e84621 100644
87050 --- a/kernel/trace/trace.c
87051 +++ b/kernel/trace/trace.c
87052 @@ -3335,7 +3335,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
87053 return 0;
87054 }
87055
87056 -int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
87057 +int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
87058 {
87059 /* do nothing if flag is already set */
87060 if (!!(trace_flags & mask) == !!enabled)
87061 diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
87062 index 10c86fb..645ab0a 100644
87063 --- a/kernel/trace/trace.h
87064 +++ b/kernel/trace/trace.h
87065 @@ -1029,7 +1029,7 @@ extern const char *__stop___tracepoint_str[];
87066 void trace_printk_init_buffers(void);
87067 void trace_printk_start_comm(void);
87068 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
87069 -int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
87070 +int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
87071
87072 /*
87073 * Normal trace_printk() and friends allocates special buffers
87074 diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
87075 index 26dc348..8708ca7 100644
87076 --- a/kernel/trace/trace_clock.c
87077 +++ b/kernel/trace/trace_clock.c
87078 @@ -123,7 +123,7 @@ u64 notrace trace_clock_global(void)
87079 return now;
87080 }
87081
87082 -static atomic64_t trace_counter;
87083 +static atomic64_unchecked_t trace_counter;
87084
87085 /*
87086 * trace_clock_counter(): simply an atomic counter.
87087 @@ -132,5 +132,5 @@ static atomic64_t trace_counter;
87088 */
87089 u64 notrace trace_clock_counter(void)
87090 {
87091 - return atomic64_add_return(1, &trace_counter);
87092 + return atomic64_inc_return_unchecked(&trace_counter);
87093 }
87094 diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
87095 index 368a4d5..7b6d1d5 100644
87096 --- a/kernel/trace/trace_events.c
87097 +++ b/kernel/trace/trace_events.c
87098 @@ -1673,7 +1673,6 @@ __trace_early_add_new_event(struct ftrace_event_call *call,
87099 return 0;
87100 }
87101
87102 -struct ftrace_module_file_ops;
87103 static void __add_event_to_tracers(struct ftrace_event_call *call);
87104
87105 /* Add an additional event_call dynamically */
87106 diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
87107 index b3dcfb2..ebee344 100644
87108 --- a/kernel/trace/trace_mmiotrace.c
87109 +++ b/kernel/trace/trace_mmiotrace.c
87110 @@ -24,7 +24,7 @@ struct header_iter {
87111 static struct trace_array *mmio_trace_array;
87112 static bool overrun_detected;
87113 static unsigned long prev_overruns;
87114 -static atomic_t dropped_count;
87115 +static atomic_unchecked_t dropped_count;
87116
87117 static void mmio_reset_data(struct trace_array *tr)
87118 {
87119 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
87120
87121 static unsigned long count_overruns(struct trace_iterator *iter)
87122 {
87123 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
87124 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
87125 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
87126
87127 if (over > prev_overruns)
87128 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
87129 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
87130 sizeof(*entry), 0, pc);
87131 if (!event) {
87132 - atomic_inc(&dropped_count);
87133 + atomic_inc_unchecked(&dropped_count);
87134 return;
87135 }
87136 entry = ring_buffer_event_data(event);
87137 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
87138 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
87139 sizeof(*entry), 0, pc);
87140 if (!event) {
87141 - atomic_inc(&dropped_count);
87142 + atomic_inc_unchecked(&dropped_count);
87143 return;
87144 }
87145 entry = ring_buffer_event_data(event);
87146 diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
87147 index 34e7cba..6f9a729 100644
87148 --- a/kernel/trace/trace_output.c
87149 +++ b/kernel/trace/trace_output.c
87150 @@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
87151
87152 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
87153 if (!IS_ERR(p)) {
87154 - p = mangle_path(s->buffer + s->len, p, "\n");
87155 + p = mangle_path(s->buffer + s->len, p, "\n\\");
87156 if (p) {
87157 s->len = p - s->buffer;
87158 return 1;
87159 @@ -893,14 +893,16 @@ int register_ftrace_event(struct trace_event *event)
87160 goto out;
87161 }
87162
87163 + pax_open_kernel();
87164 if (event->funcs->trace == NULL)
87165 - event->funcs->trace = trace_nop_print;
87166 + *(void **)&event->funcs->trace = trace_nop_print;
87167 if (event->funcs->raw == NULL)
87168 - event->funcs->raw = trace_nop_print;
87169 + *(void **)&event->funcs->raw = trace_nop_print;
87170 if (event->funcs->hex == NULL)
87171 - event->funcs->hex = trace_nop_print;
87172 + *(void **)&event->funcs->hex = trace_nop_print;
87173 if (event->funcs->binary == NULL)
87174 - event->funcs->binary = trace_nop_print;
87175 + *(void **)&event->funcs->binary = trace_nop_print;
87176 + pax_close_kernel();
87177
87178 key = event->type & (EVENT_HASHSIZE - 1);
87179
87180 diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
87181 index b20428c..4845a10 100644
87182 --- a/kernel/trace/trace_stack.c
87183 +++ b/kernel/trace/trace_stack.c
87184 @@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
87185 return;
87186
87187 /* we do not handle interrupt stacks yet */
87188 - if (!object_is_on_stack(stack))
87189 + if (!object_starts_on_stack(stack))
87190 return;
87191
87192 local_irq_save(flags);
87193 diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
87194 index 13fb113..a6ced3f 100644
87195 --- a/kernel/user_namespace.c
87196 +++ b/kernel/user_namespace.c
87197 @@ -82,6 +82,21 @@ int create_user_ns(struct cred *new)
87198 !kgid_has_mapping(parent_ns, group))
87199 return -EPERM;
87200
87201 +#ifdef CONFIG_GRKERNSEC
87202 + /*
87203 + * This doesn't really inspire confidence:
87204 + * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
87205 + * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
87206 + * Increases kernel attack surface in areas developers
87207 + * previously cared little about ("low importance due
87208 + * to requiring "root" capability")
87209 + * To be removed when this code receives *proper* review
87210 + */
87211 + if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
87212 + !capable(CAP_SETGID))
87213 + return -EPERM;
87214 +#endif
87215 +
87216 ns = kmem_cache_zalloc(user_ns_cachep, GFP_KERNEL);
87217 if (!ns)
87218 return -ENOMEM;
87219 @@ -860,7 +875,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
87220 if (atomic_read(&current->mm->mm_users) > 1)
87221 return -EINVAL;
87222
87223 - if (current->fs->users != 1)
87224 + if (atomic_read(&current->fs->users) != 1)
87225 return -EINVAL;
87226
87227 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
87228 diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
87229 index 4f69f9a..7c6f8f8 100644
87230 --- a/kernel/utsname_sysctl.c
87231 +++ b/kernel/utsname_sysctl.c
87232 @@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
87233 static int proc_do_uts_string(ctl_table *table, int write,
87234 void __user *buffer, size_t *lenp, loff_t *ppos)
87235 {
87236 - struct ctl_table uts_table;
87237 + ctl_table_no_const uts_table;
87238 int r;
87239 memcpy(&uts_table, table, sizeof(uts_table));
87240 uts_table.data = get_uts(table, write);
87241 diff --git a/kernel/watchdog.c b/kernel/watchdog.c
87242 index 4431610..4265616 100644
87243 --- a/kernel/watchdog.c
87244 +++ b/kernel/watchdog.c
87245 @@ -475,7 +475,7 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
87246 static void watchdog_nmi_disable(unsigned int cpu) { return; }
87247 #endif /* CONFIG_HARDLOCKUP_DETECTOR */
87248
87249 -static struct smp_hotplug_thread watchdog_threads = {
87250 +static struct smp_hotplug_thread watchdog_threads __read_only = {
87251 .store = &softlockup_watchdog,
87252 .thread_should_run = watchdog_should_run,
87253 .thread_fn = watchdog,
87254 diff --git a/kernel/workqueue.c b/kernel/workqueue.c
87255 index 93c2652..66a1cfd 100644
87256 --- a/kernel/workqueue.c
87257 +++ b/kernel/workqueue.c
87258 @@ -4668,7 +4668,7 @@ static void rebind_workers(struct worker_pool *pool)
87259 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
87260 worker_flags |= WORKER_REBOUND;
87261 worker_flags &= ~WORKER_UNBOUND;
87262 - ACCESS_ONCE(worker->flags) = worker_flags;
87263 + ACCESS_ONCE_RW(worker->flags) = worker_flags;
87264 }
87265
87266 spin_unlock_irq(&pool->lock);
87267 diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
87268 index 094f315..244a824 100644
87269 --- a/lib/Kconfig.debug
87270 +++ b/lib/Kconfig.debug
87271 @@ -836,7 +836,7 @@ config DEBUG_MUTEXES
87272
87273 config DEBUG_WW_MUTEX_SLOWPATH
87274 bool "Wait/wound mutex debugging: Slowpath testing"
87275 - depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
87276 + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
87277 select DEBUG_LOCK_ALLOC
87278 select DEBUG_SPINLOCK
87279 select DEBUG_MUTEXES
87280 @@ -849,7 +849,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
87281
87282 config DEBUG_LOCK_ALLOC
87283 bool "Lock debugging: detect incorrect freeing of live locks"
87284 - depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
87285 + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
87286 select DEBUG_SPINLOCK
87287 select DEBUG_MUTEXES
87288 select LOCKDEP
87289 @@ -863,7 +863,7 @@ config DEBUG_LOCK_ALLOC
87290
87291 config PROVE_LOCKING
87292 bool "Lock debugging: prove locking correctness"
87293 - depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
87294 + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
87295 select LOCKDEP
87296 select DEBUG_SPINLOCK
87297 select DEBUG_MUTEXES
87298 @@ -914,7 +914,7 @@ config LOCKDEP
87299
87300 config LOCK_STAT
87301 bool "Lock usage statistics"
87302 - depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
87303 + depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
87304 select LOCKDEP
87305 select DEBUG_SPINLOCK
87306 select DEBUG_MUTEXES
87307 @@ -1376,6 +1376,7 @@ config LATENCYTOP
87308 depends on DEBUG_KERNEL
87309 depends on STACKTRACE_SUPPORT
87310 depends on PROC_FS
87311 + depends on !GRKERNSEC_HIDESYM
87312 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
87313 select KALLSYMS
87314 select KALLSYMS_ALL
87315 @@ -1392,7 +1393,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
87316 config DEBUG_STRICT_USER_COPY_CHECKS
87317 bool "Strict user copy size checks"
87318 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
87319 - depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
87320 + depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
87321 help
87322 Enabling this option turns a certain set of sanity checks for user
87323 copy operations into compile time failures.
87324 @@ -1502,7 +1503,7 @@ endmenu # runtime tests
87325
87326 config PROVIDE_OHCI1394_DMA_INIT
87327 bool "Remote debugging over FireWire early on boot"
87328 - depends on PCI && X86
87329 + depends on PCI && X86 && !GRKERNSEC
87330 help
87331 If you want to debug problems which hang or crash the kernel early
87332 on boot and the crashing machine has a FireWire port, you can use
87333 @@ -1531,7 +1532,7 @@ config PROVIDE_OHCI1394_DMA_INIT
87334
87335 config FIREWIRE_OHCI_REMOTE_DMA
87336 bool "Remote debugging over FireWire with firewire-ohci"
87337 - depends on FIREWIRE_OHCI
87338 + depends on FIREWIRE_OHCI && !GRKERNSEC
87339 help
87340 This option lets you use the FireWire bus for remote debugging
87341 with help of the firewire-ohci driver. It enables unfiltered
87342 diff --git a/lib/Makefile b/lib/Makefile
87343 index f3bb2cb..b358ace 100644
87344 --- a/lib/Makefile
87345 +++ b/lib/Makefile
87346 @@ -52,7 +52,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
87347
87348 obj-$(CONFIG_BTREE) += btree.o
87349 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
87350 -obj-$(CONFIG_DEBUG_LIST) += list_debug.o
87351 +obj-y += list_debug.o
87352 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
87353
87354 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
87355 diff --git a/lib/bitmap.c b/lib/bitmap.c
87356 index 06f7e4f..f3cf2b0 100644
87357 --- a/lib/bitmap.c
87358 +++ b/lib/bitmap.c
87359 @@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
87360 {
87361 int c, old_c, totaldigits, ndigits, nchunks, nbits;
87362 u32 chunk;
87363 - const char __user __force *ubuf = (const char __user __force *)buf;
87364 + const char __user *ubuf = (const char __force_user *)buf;
87365
87366 bitmap_zero(maskp, nmaskbits);
87367
87368 @@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
87369 {
87370 if (!access_ok(VERIFY_READ, ubuf, ulen))
87371 return -EFAULT;
87372 - return __bitmap_parse((const char __force *)ubuf,
87373 + return __bitmap_parse((const char __force_kernel *)ubuf,
87374 ulen, 1, maskp, nmaskbits);
87375
87376 }
87377 @@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
87378 {
87379 unsigned a, b;
87380 int c, old_c, totaldigits;
87381 - const char __user __force *ubuf = (const char __user __force *)buf;
87382 + const char __user *ubuf = (const char __force_user *)buf;
87383 int exp_digit, in_range;
87384
87385 totaldigits = c = 0;
87386 @@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
87387 {
87388 if (!access_ok(VERIFY_READ, ubuf, ulen))
87389 return -EFAULT;
87390 - return __bitmap_parselist((const char __force *)ubuf,
87391 + return __bitmap_parselist((const char __force_kernel *)ubuf,
87392 ulen, 1, maskp, nmaskbits);
87393 }
87394 EXPORT_SYMBOL(bitmap_parselist_user);
87395 diff --git a/lib/bug.c b/lib/bug.c
87396 index 1686034..a9c00c8 100644
87397 --- a/lib/bug.c
87398 +++ b/lib/bug.c
87399 @@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
87400 return BUG_TRAP_TYPE_NONE;
87401
87402 bug = find_bug(bugaddr);
87403 + if (!bug)
87404 + return BUG_TRAP_TYPE_NONE;
87405
87406 file = NULL;
87407 line = 0;
87408 diff --git a/lib/debugobjects.c b/lib/debugobjects.c
87409 index bf2c8b1..1d00ccf 100644
87410 --- a/lib/debugobjects.c
87411 +++ b/lib/debugobjects.c
87412 @@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
87413 if (limit > 4)
87414 return;
87415
87416 - is_on_stack = object_is_on_stack(addr);
87417 + is_on_stack = object_starts_on_stack(addr);
87418 if (is_on_stack == onstack)
87419 return;
87420
87421 diff --git a/lib/devres.c b/lib/devres.c
87422 index 8235331..5881053 100644
87423 --- a/lib/devres.c
87424 +++ b/lib/devres.c
87425 @@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
87426 void devm_iounmap(struct device *dev, void __iomem *addr)
87427 {
87428 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
87429 - (void *)addr));
87430 + (void __force *)addr));
87431 iounmap(addr);
87432 }
87433 EXPORT_SYMBOL(devm_iounmap);
87434 @@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
87435 {
87436 ioport_unmap(addr);
87437 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
87438 - devm_ioport_map_match, (void *)addr));
87439 + devm_ioport_map_match, (void __force *)addr));
87440 }
87441 EXPORT_SYMBOL(devm_ioport_unmap);
87442 #endif /* CONFIG_HAS_IOPORT */
87443 diff --git a/lib/div64.c b/lib/div64.c
87444 index 4382ad7..08aa558 100644
87445 --- a/lib/div64.c
87446 +++ b/lib/div64.c
87447 @@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
87448 EXPORT_SYMBOL(__div64_32);
87449
87450 #ifndef div_s64_rem
87451 -s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
87452 +s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
87453 {
87454 u64 quotient;
87455
87456 @@ -130,7 +130,7 @@ EXPORT_SYMBOL(div64_u64_rem);
87457 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
87458 */
87459 #ifndef div64_u64
87460 -u64 div64_u64(u64 dividend, u64 divisor)
87461 +u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
87462 {
87463 u32 high = divisor >> 32;
87464 u64 quot;
87465 diff --git a/lib/dma-debug.c b/lib/dma-debug.c
87466 index d87a17a..ac0d79a 100644
87467 --- a/lib/dma-debug.c
87468 +++ b/lib/dma-debug.c
87469 @@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
87470
87471 void dma_debug_add_bus(struct bus_type *bus)
87472 {
87473 - struct notifier_block *nb;
87474 + notifier_block_no_const *nb;
87475
87476 if (global_disable)
87477 return;
87478 @@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
87479
87480 static void check_for_stack(struct device *dev, void *addr)
87481 {
87482 - if (object_is_on_stack(addr))
87483 + if (object_starts_on_stack(addr))
87484 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
87485 "stack [addr=%p]\n", addr);
87486 }
87487 diff --git a/lib/inflate.c b/lib/inflate.c
87488 index 013a761..c28f3fc 100644
87489 --- a/lib/inflate.c
87490 +++ b/lib/inflate.c
87491 @@ -269,7 +269,7 @@ static void free(void *where)
87492 malloc_ptr = free_mem_ptr;
87493 }
87494 #else
87495 -#define malloc(a) kmalloc(a, GFP_KERNEL)
87496 +#define malloc(a) kmalloc((a), GFP_KERNEL)
87497 #define free(a) kfree(a)
87498 #endif
87499
87500 diff --git a/lib/ioremap.c b/lib/ioremap.c
87501 index 0c9216c..863bd89 100644
87502 --- a/lib/ioremap.c
87503 +++ b/lib/ioremap.c
87504 @@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
87505 unsigned long next;
87506
87507 phys_addr -= addr;
87508 - pmd = pmd_alloc(&init_mm, pud, addr);
87509 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
87510 if (!pmd)
87511 return -ENOMEM;
87512 do {
87513 @@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
87514 unsigned long next;
87515
87516 phys_addr -= addr;
87517 - pud = pud_alloc(&init_mm, pgd, addr);
87518 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
87519 if (!pud)
87520 return -ENOMEM;
87521 do {
87522 diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
87523 index bd2bea9..6b3c95e 100644
87524 --- a/lib/is_single_threaded.c
87525 +++ b/lib/is_single_threaded.c
87526 @@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
87527 struct task_struct *p, *t;
87528 bool ret;
87529
87530 + if (!mm)
87531 + return true;
87532 +
87533 if (atomic_read(&task->signal->live) != 1)
87534 return false;
87535
87536 diff --git a/lib/kobject.c b/lib/kobject.c
87537 index 084f7b1..d265b8a 100644
87538 --- a/lib/kobject.c
87539 +++ b/lib/kobject.c
87540 @@ -875,9 +875,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
87541
87542
87543 static DEFINE_SPINLOCK(kobj_ns_type_lock);
87544 -static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
87545 +static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
87546
87547 -int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
87548 +int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
87549 {
87550 enum kobj_ns_type type = ops->type;
87551 int error;
87552 diff --git a/lib/list_debug.c b/lib/list_debug.c
87553 index c24c2f7..f0296f4 100644
87554 --- a/lib/list_debug.c
87555 +++ b/lib/list_debug.c
87556 @@ -11,7 +11,9 @@
87557 #include <linux/bug.h>
87558 #include <linux/kernel.h>
87559 #include <linux/rculist.h>
87560 +#include <linux/mm.h>
87561
87562 +#ifdef CONFIG_DEBUG_LIST
87563 /*
87564 * Insert a new entry between two known consecutive entries.
87565 *
87566 @@ -19,21 +21,40 @@
87567 * the prev/next entries already!
87568 */
87569
87570 +static bool __list_add_debug(struct list_head *new,
87571 + struct list_head *prev,
87572 + struct list_head *next)
87573 +{
87574 + if (unlikely(next->prev != prev)) {
87575 + printk(KERN_ERR "list_add corruption. next->prev should be "
87576 + "prev (%p), but was %p. (next=%p).\n",
87577 + prev, next->prev, next);
87578 + BUG();
87579 + return false;
87580 + }
87581 + if (unlikely(prev->next != next)) {
87582 + printk(KERN_ERR "list_add corruption. prev->next should be "
87583 + "next (%p), but was %p. (prev=%p).\n",
87584 + next, prev->next, prev);
87585 + BUG();
87586 + return false;
87587 + }
87588 + if (unlikely(new == prev || new == next)) {
87589 + printk(KERN_ERR "list_add double add: new=%p, prev=%p, next=%p.\n",
87590 + new, prev, next);
87591 + BUG();
87592 + return false;
87593 + }
87594 + return true;
87595 +}
87596 +
87597 void __list_add(struct list_head *new,
87598 - struct list_head *prev,
87599 - struct list_head *next)
87600 + struct list_head *prev,
87601 + struct list_head *next)
87602 {
87603 - WARN(next->prev != prev,
87604 - "list_add corruption. next->prev should be "
87605 - "prev (%p), but was %p. (next=%p).\n",
87606 - prev, next->prev, next);
87607 - WARN(prev->next != next,
87608 - "list_add corruption. prev->next should be "
87609 - "next (%p), but was %p. (prev=%p).\n",
87610 - next, prev->next, prev);
87611 - WARN(new == prev || new == next,
87612 - "list_add double add: new=%p, prev=%p, next=%p.\n",
87613 - new, prev, next);
87614 + if (!__list_add_debug(new, prev, next))
87615 + return;
87616 +
87617 next->prev = new;
87618 new->next = next;
87619 new->prev = prev;
87620 @@ -41,28 +62,46 @@ void __list_add(struct list_head *new,
87621 }
87622 EXPORT_SYMBOL(__list_add);
87623
87624 -void __list_del_entry(struct list_head *entry)
87625 +static bool __list_del_entry_debug(struct list_head *entry)
87626 {
87627 struct list_head *prev, *next;
87628
87629 prev = entry->prev;
87630 next = entry->next;
87631
87632 - if (WARN(next == LIST_POISON1,
87633 - "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
87634 - entry, LIST_POISON1) ||
87635 - WARN(prev == LIST_POISON2,
87636 - "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
87637 - entry, LIST_POISON2) ||
87638 - WARN(prev->next != entry,
87639 - "list_del corruption. prev->next should be %p, "
87640 - "but was %p\n", entry, prev->next) ||
87641 - WARN(next->prev != entry,
87642 - "list_del corruption. next->prev should be %p, "
87643 - "but was %p\n", entry, next->prev))
87644 + if (unlikely(next == LIST_POISON1)) {
87645 + printk(KERN_ERR "list_del corruption, %p->next is LIST_POISON1 (%p)\n",
87646 + entry, LIST_POISON1);
87647 + BUG();
87648 + return false;
87649 + }
87650 + if (unlikely(prev == LIST_POISON2)) {
87651 + printk(KERN_ERR "list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
87652 + entry, LIST_POISON2);
87653 + BUG();
87654 + return false;
87655 + }
87656 + if (unlikely(entry->prev->next != entry)) {
87657 + printk(KERN_ERR "list_del corruption. prev->next should be %p, "
87658 + "but was %p\n", entry, prev->next);
87659 + BUG();
87660 + return false;
87661 + }
87662 + if (unlikely(entry->next->prev != entry)) {
87663 + printk(KERN_ERR "list_del corruption. next->prev should be %p, "
87664 + "but was %p\n", entry, next->prev);
87665 + BUG();
87666 + return false;
87667 + }
87668 + return true;
87669 +}
87670 +
87671 +void __list_del_entry(struct list_head *entry)
87672 +{
87673 + if (!__list_del_entry_debug(entry))
87674 return;
87675
87676 - __list_del(prev, next);
87677 + __list_del(entry->prev, entry->next);
87678 }
87679 EXPORT_SYMBOL(__list_del_entry);
87680
87681 @@ -86,15 +125,85 @@ EXPORT_SYMBOL(list_del);
87682 void __list_add_rcu(struct list_head *new,
87683 struct list_head *prev, struct list_head *next)
87684 {
87685 - WARN(next->prev != prev,
87686 - "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
87687 - prev, next->prev, next);
87688 - WARN(prev->next != next,
87689 - "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
87690 - next, prev->next, prev);
87691 + if (!__list_add_debug(new, prev, next))
87692 + return;
87693 +
87694 new->next = next;
87695 new->prev = prev;
87696 rcu_assign_pointer(list_next_rcu(prev), new);
87697 next->prev = new;
87698 }
87699 EXPORT_SYMBOL(__list_add_rcu);
87700 +#endif
87701 +
87702 +void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
87703 +{
87704 +#ifdef CONFIG_DEBUG_LIST
87705 + if (!__list_add_debug(new, prev, next))
87706 + return;
87707 +#endif
87708 +
87709 + pax_open_kernel();
87710 + next->prev = new;
87711 + new->next = next;
87712 + new->prev = prev;
87713 + prev->next = new;
87714 + pax_close_kernel();
87715 +}
87716 +EXPORT_SYMBOL(__pax_list_add);
87717 +
87718 +void pax_list_del(struct list_head *entry)
87719 +{
87720 +#ifdef CONFIG_DEBUG_LIST
87721 + if (!__list_del_entry_debug(entry))
87722 + return;
87723 +#endif
87724 +
87725 + pax_open_kernel();
87726 + __list_del(entry->prev, entry->next);
87727 + entry->next = LIST_POISON1;
87728 + entry->prev = LIST_POISON2;
87729 + pax_close_kernel();
87730 +}
87731 +EXPORT_SYMBOL(pax_list_del);
87732 +
87733 +void pax_list_del_init(struct list_head *entry)
87734 +{
87735 + pax_open_kernel();
87736 + __list_del(entry->prev, entry->next);
87737 + INIT_LIST_HEAD(entry);
87738 + pax_close_kernel();
87739 +}
87740 +EXPORT_SYMBOL(pax_list_del_init);
87741 +
87742 +void __pax_list_add_rcu(struct list_head *new,
87743 + struct list_head *prev, struct list_head *next)
87744 +{
87745 +#ifdef CONFIG_DEBUG_LIST
87746 + if (!__list_add_debug(new, prev, next))
87747 + return;
87748 +#endif
87749 +
87750 + pax_open_kernel();
87751 + new->next = next;
87752 + new->prev = prev;
87753 + rcu_assign_pointer(list_next_rcu(prev), new);
87754 + next->prev = new;
87755 + pax_close_kernel();
87756 +}
87757 +EXPORT_SYMBOL(__pax_list_add_rcu);
87758 +
87759 +void pax_list_del_rcu(struct list_head *entry)
87760 +{
87761 +#ifdef CONFIG_DEBUG_LIST
87762 + if (!__list_del_entry_debug(entry))
87763 + return;
87764 +#endif
87765 +
87766 + pax_open_kernel();
87767 + __list_del(entry->prev, entry->next);
87768 + entry->next = LIST_POISON1;
87769 + entry->prev = LIST_POISON2;
87770 + pax_close_kernel();
87771 +}
87772 +EXPORT_SYMBOL(pax_list_del_rcu);
87773 diff --git a/lib/percpu-refcount.c b/lib/percpu-refcount.c
87774 index 1a53d49..ace934c 100644
87775 --- a/lib/percpu-refcount.c
87776 +++ b/lib/percpu-refcount.c
87777 @@ -29,7 +29,7 @@
87778 * can't hit 0 before we've added up all the percpu refs.
87779 */
87780
87781 -#define PCPU_COUNT_BIAS (1U << 31)
87782 +#define PCPU_COUNT_BIAS (1U << 30)
87783
87784 /**
87785 * percpu_ref_init - initialize a percpu refcount
87786 diff --git a/lib/radix-tree.c b/lib/radix-tree.c
87787 index 7811ed3..f80ca19 100644
87788 --- a/lib/radix-tree.c
87789 +++ b/lib/radix-tree.c
87790 @@ -93,7 +93,7 @@ struct radix_tree_preload {
87791 int nr;
87792 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
87793 };
87794 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
87795 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
87796
87797 static inline void *ptr_to_indirect(void *ptr)
87798 {
87799 diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
87800 index bb2b201..46abaf9 100644
87801 --- a/lib/strncpy_from_user.c
87802 +++ b/lib/strncpy_from_user.c
87803 @@ -21,7 +21,7 @@
87804 */
87805 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
87806 {
87807 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
87808 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
87809 long res = 0;
87810
87811 /*
87812 diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
87813 index a28df52..3d55877 100644
87814 --- a/lib/strnlen_user.c
87815 +++ b/lib/strnlen_user.c
87816 @@ -26,7 +26,7 @@
87817 */
87818 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
87819 {
87820 - const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
87821 + static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
87822 long align, res = 0;
87823 unsigned long c;
87824
87825 diff --git a/lib/swiotlb.c b/lib/swiotlb.c
87826 index 4e8686c..3e8c92f 100644
87827 --- a/lib/swiotlb.c
87828 +++ b/lib/swiotlb.c
87829 @@ -664,7 +664,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
87830
87831 void
87832 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
87833 - dma_addr_t dev_addr)
87834 + dma_addr_t dev_addr, struct dma_attrs *attrs)
87835 {
87836 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
87837
87838 diff --git a/lib/usercopy.c b/lib/usercopy.c
87839 index 4f5b1dd..7cab418 100644
87840 --- a/lib/usercopy.c
87841 +++ b/lib/usercopy.c
87842 @@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
87843 WARN(1, "Buffer overflow detected!\n");
87844 }
87845 EXPORT_SYMBOL(copy_from_user_overflow);
87846 +
87847 +void copy_to_user_overflow(void)
87848 +{
87849 + WARN(1, "Buffer overflow detected!\n");
87850 +}
87851 +EXPORT_SYMBOL(copy_to_user_overflow);
87852 diff --git a/lib/vsprintf.c b/lib/vsprintf.c
87853 index d76555c..62d4bfe 100644
87854 --- a/lib/vsprintf.c
87855 +++ b/lib/vsprintf.c
87856 @@ -16,6 +16,9 @@
87857 * - scnprintf and vscnprintf
87858 */
87859
87860 +#ifdef CONFIG_GRKERNSEC_HIDESYM
87861 +#define __INCLUDED_BY_HIDESYM 1
87862 +#endif
87863 #include <stdarg.h>
87864 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
87865 #include <linux/types.h>
87866 @@ -1155,7 +1158,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
87867 return number(buf, end, *(const netdev_features_t *)addr, spec);
87868 }
87869
87870 +#ifdef CONFIG_GRKERNSEC_HIDESYM
87871 +int kptr_restrict __read_mostly = 2;
87872 +#else
87873 int kptr_restrict __read_mostly;
87874 +#endif
87875
87876 /*
87877 * Show a '%p' thing. A kernel extension is that the '%p' is followed
87878 @@ -1168,6 +1175,7 @@ int kptr_restrict __read_mostly;
87879 * - 'f' For simple symbolic function names without offset
87880 * - 'S' For symbolic direct pointers with offset
87881 * - 's' For symbolic direct pointers without offset
87882 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
87883 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
87884 * - 'B' For backtraced symbolic direct pointers with offset
87885 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
87886 @@ -1232,12 +1240,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
87887
87888 if (!ptr && *fmt != 'K') {
87889 /*
87890 - * Print (null) with the same width as a pointer so it makes
87891 + * Print (nil) with the same width as a pointer so it makes
87892 * tabular output look nice.
87893 */
87894 if (spec.field_width == -1)
87895 spec.field_width = default_width;
87896 - return string(buf, end, "(null)", spec);
87897 + return string(buf, end, "(nil)", spec);
87898 }
87899
87900 switch (*fmt) {
87901 @@ -1247,6 +1255,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
87902 /* Fallthrough */
87903 case 'S':
87904 case 's':
87905 +#ifdef CONFIG_GRKERNSEC_HIDESYM
87906 + break;
87907 +#else
87908 + return symbol_string(buf, end, ptr, spec, fmt);
87909 +#endif
87910 + case 'A':
87911 case 'B':
87912 return symbol_string(buf, end, ptr, spec, fmt);
87913 case 'R':
87914 @@ -1302,6 +1316,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
87915 va_end(va);
87916 return buf;
87917 }
87918 + case 'P':
87919 + break;
87920 case 'K':
87921 /*
87922 * %pK cannot be used in IRQ context because its test
87923 @@ -1363,6 +1379,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
87924 ((const struct file *)ptr)->f_path.dentry,
87925 spec, fmt);
87926 }
87927 +
87928 +#ifdef CONFIG_GRKERNSEC_HIDESYM
87929 + /* 'P' = approved pointers to copy to userland,
87930 + as in the /proc/kallsyms case, as we make it display nothing
87931 + for non-root users, and the real contents for root users
87932 + Also ignore 'K' pointers, since we force their NULLing for non-root users
87933 + above
87934 + */
87935 + if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
87936 + printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
87937 + dump_stack();
87938 + ptr = NULL;
87939 + }
87940 +#endif
87941 +
87942 spec.flags |= SMALL;
87943 if (spec.field_width == -1) {
87944 spec.field_width = default_width;
87945 @@ -2086,11 +2117,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
87946 typeof(type) value; \
87947 if (sizeof(type) == 8) { \
87948 args = PTR_ALIGN(args, sizeof(u32)); \
87949 - *(u32 *)&value = *(u32 *)args; \
87950 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
87951 + *(u32 *)&value = *(const u32 *)args; \
87952 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
87953 } else { \
87954 args = PTR_ALIGN(args, sizeof(type)); \
87955 - value = *(typeof(type) *)args; \
87956 + value = *(const typeof(type) *)args; \
87957 } \
87958 args += sizeof(type); \
87959 value; \
87960 @@ -2153,7 +2184,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
87961 case FORMAT_TYPE_STR: {
87962 const char *str_arg = args;
87963 args += strlen(str_arg) + 1;
87964 - str = string(str, end, (char *)str_arg, spec);
87965 + str = string(str, end, str_arg, spec);
87966 break;
87967 }
87968
87969 diff --git a/localversion-grsec b/localversion-grsec
87970 new file mode 100644
87971 index 0000000..7cd6065
87972 --- /dev/null
87973 +++ b/localversion-grsec
87974 @@ -0,0 +1 @@
87975 +-grsec
87976 diff --git a/mm/Kconfig b/mm/Kconfig
87977 index 394838f..0e5f816 100644
87978 --- a/mm/Kconfig
87979 +++ b/mm/Kconfig
87980 @@ -317,10 +317,11 @@ config KSM
87981 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
87982
87983 config DEFAULT_MMAP_MIN_ADDR
87984 - int "Low address space to protect from user allocation"
87985 + int "Low address space to protect from user allocation"
87986 depends on MMU
87987 - default 4096
87988 - help
87989 + default 32768 if ALPHA || ARM || PARISC || SPARC32
87990 + default 65536
87991 + help
87992 This is the portion of low virtual memory which should be protected
87993 from userspace allocation. Keeping a user from writing to low pages
87994 can help reduce the impact of kernel NULL pointer bugs.
87995 @@ -351,7 +352,7 @@ config MEMORY_FAILURE
87996
87997 config HWPOISON_INJECT
87998 tristate "HWPoison pages injector"
87999 - depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
88000 + depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
88001 select PROC_PAGE_MONITOR
88002
88003 config NOMMU_INITIAL_TRIM_EXCESS
88004 diff --git a/mm/backing-dev.c b/mm/backing-dev.c
88005 index ce682f7..1fb54f9 100644
88006 --- a/mm/backing-dev.c
88007 +++ b/mm/backing-dev.c
88008 @@ -12,7 +12,7 @@
88009 #include <linux/device.h>
88010 #include <trace/events/writeback.h>
88011
88012 -static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0);
88013 +static atomic_long_unchecked_t bdi_seq = ATOMIC_LONG_INIT(0);
88014
88015 struct backing_dev_info default_backing_dev_info = {
88016 .name = "default",
88017 @@ -525,7 +525,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
88018 return err;
88019
88020 err = bdi_register(bdi, NULL, "%.28s-%ld", name,
88021 - atomic_long_inc_return(&bdi_seq));
88022 + atomic_long_inc_return_unchecked(&bdi_seq));
88023 if (err) {
88024 bdi_destroy(bdi);
88025 return err;
88026 diff --git a/mm/filemap.c b/mm/filemap.c
88027 index ae4846f..b0acebe 100644
88028 --- a/mm/filemap.c
88029 +++ b/mm/filemap.c
88030 @@ -1768,7 +1768,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
88031 struct address_space *mapping = file->f_mapping;
88032
88033 if (!mapping->a_ops->readpage)
88034 - return -ENOEXEC;
88035 + return -ENODEV;
88036 file_accessed(file);
88037 vma->vm_ops = &generic_file_vm_ops;
88038 return 0;
88039 @@ -1950,7 +1950,7 @@ static size_t __iovec_copy_from_user_inatomic(char *vaddr,
88040
88041 while (bytes) {
88042 char __user *buf = iov->iov_base + base;
88043 - int copy = min(bytes, iov->iov_len - base);
88044 + size_t copy = min(bytes, iov->iov_len - base);
88045
88046 base = 0;
88047 left = __copy_from_user_inatomic(vaddr, buf, copy);
88048 @@ -1979,7 +1979,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
88049 BUG_ON(!in_atomic());
88050 kaddr = kmap_atomic(page);
88051 if (likely(i->nr_segs == 1)) {
88052 - int left;
88053 + size_t left;
88054 char __user *buf = i->iov->iov_base + i->iov_offset;
88055 left = __copy_from_user_inatomic(kaddr + offset, buf, bytes);
88056 copied = bytes - left;
88057 @@ -2007,7 +2007,7 @@ size_t iov_iter_copy_from_user(struct page *page,
88058
88059 kaddr = kmap(page);
88060 if (likely(i->nr_segs == 1)) {
88061 - int left;
88062 + size_t left;
88063 char __user *buf = i->iov->iov_base + i->iov_offset;
88064 left = __copy_from_user(kaddr + offset, buf, bytes);
88065 copied = bytes - left;
88066 @@ -2037,7 +2037,7 @@ void iov_iter_advance(struct iov_iter *i, size_t bytes)
88067 * zero-length segments (without overruning the iovec).
88068 */
88069 while (bytes || unlikely(i->count && !iov->iov_len)) {
88070 - int copy;
88071 + size_t copy;
88072
88073 copy = min(bytes, iov->iov_len - base);
88074 BUG_ON(!i->count || i->count < copy);
88075 @@ -2108,6 +2108,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
88076 *pos = i_size_read(inode);
88077
88078 if (limit != RLIM_INFINITY) {
88079 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
88080 if (*pos >= limit) {
88081 send_sig(SIGXFSZ, current, 0);
88082 return -EFBIG;
88083 diff --git a/mm/fremap.c b/mm/fremap.c
88084 index 5bff081..bfa6e93 100644
88085 --- a/mm/fremap.c
88086 +++ b/mm/fremap.c
88087 @@ -163,6 +163,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
88088 retry:
88089 vma = find_vma(mm, start);
88090
88091 +#ifdef CONFIG_PAX_SEGMEXEC
88092 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
88093 + goto out;
88094 +#endif
88095 +
88096 /*
88097 * Make sure the vma is shared, that it supports prefaulting,
88098 * and that the remapped range is valid and fully within
88099 @@ -208,9 +213,10 @@ get_write_lock:
88100 if (mapping_cap_account_dirty(mapping)) {
88101 unsigned long addr;
88102 struct file *file = get_file(vma->vm_file);
88103 + /* mmap_region may free vma; grab the info now */
88104 + vm_flags = ACCESS_ONCE(vma->vm_flags);
88105
88106 - addr = mmap_region(file, start, size,
88107 - vma->vm_flags, pgoff);
88108 + addr = mmap_region(file, start, size, vm_flags, pgoff);
88109 fput(file);
88110 if (IS_ERR_VALUE(addr)) {
88111 err = addr;
88112 @@ -218,7 +224,7 @@ get_write_lock:
88113 BUG_ON(addr != start);
88114 err = 0;
88115 }
88116 - goto out;
88117 + goto out_freed;
88118 }
88119 mutex_lock(&mapping->i_mmap_mutex);
88120 flush_dcache_mmap_lock(mapping);
88121 @@ -253,6 +259,7 @@ get_write_lock:
88122 out:
88123 if (vma)
88124 vm_flags = vma->vm_flags;
88125 +out_freed:
88126 if (likely(!has_write_lock))
88127 up_read(&mm->mmap_sem);
88128 else
88129 diff --git a/mm/highmem.c b/mm/highmem.c
88130 index b32b70c..e512eb0 100644
88131 --- a/mm/highmem.c
88132 +++ b/mm/highmem.c
88133 @@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
88134 * So no dangers, even with speculative execution.
88135 */
88136 page = pte_page(pkmap_page_table[i]);
88137 + pax_open_kernel();
88138 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
88139 -
88140 + pax_close_kernel();
88141 set_page_address(page, NULL);
88142 need_flush = 1;
88143 }
88144 @@ -198,9 +199,11 @@ start:
88145 }
88146 }
88147 vaddr = PKMAP_ADDR(last_pkmap_nr);
88148 +
88149 + pax_open_kernel();
88150 set_pte_at(&init_mm, vaddr,
88151 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
88152 -
88153 + pax_close_kernel();
88154 pkmap_count[last_pkmap_nr] = 1;
88155 set_page_address(page, (void *)vaddr);
88156
88157 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
88158 index 0b7656e..d21cefc 100644
88159 --- a/mm/hugetlb.c
88160 +++ b/mm/hugetlb.c
88161 @@ -2094,15 +2094,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
88162 struct hstate *h = &default_hstate;
88163 unsigned long tmp;
88164 int ret;
88165 + ctl_table_no_const hugetlb_table;
88166
88167 tmp = h->max_huge_pages;
88168
88169 if (write && h->order >= MAX_ORDER)
88170 return -EINVAL;
88171
88172 - table->data = &tmp;
88173 - table->maxlen = sizeof(unsigned long);
88174 - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
88175 + hugetlb_table = *table;
88176 + hugetlb_table.data = &tmp;
88177 + hugetlb_table.maxlen = sizeof(unsigned long);
88178 + ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
88179 if (ret)
88180 goto out;
88181
88182 @@ -2147,15 +2149,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
88183 struct hstate *h = &default_hstate;
88184 unsigned long tmp;
88185 int ret;
88186 + ctl_table_no_const hugetlb_table;
88187
88188 tmp = h->nr_overcommit_huge_pages;
88189
88190 if (write && h->order >= MAX_ORDER)
88191 return -EINVAL;
88192
88193 - table->data = &tmp;
88194 - table->maxlen = sizeof(unsigned long);
88195 - ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
88196 + hugetlb_table = *table;
88197 + hugetlb_table.data = &tmp;
88198 + hugetlb_table.maxlen = sizeof(unsigned long);
88199 + ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
88200 if (ret)
88201 goto out;
88202
88203 @@ -2605,6 +2609,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
88204 return 1;
88205 }
88206
88207 +#ifdef CONFIG_PAX_SEGMEXEC
88208 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
88209 +{
88210 + struct mm_struct *mm = vma->vm_mm;
88211 + struct vm_area_struct *vma_m;
88212 + unsigned long address_m;
88213 + pte_t *ptep_m;
88214 +
88215 + vma_m = pax_find_mirror_vma(vma);
88216 + if (!vma_m)
88217 + return;
88218 +
88219 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
88220 + address_m = address + SEGMEXEC_TASK_SIZE;
88221 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
88222 + get_page(page_m);
88223 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
88224 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
88225 +}
88226 +#endif
88227 +
88228 /*
88229 * Hugetlb_cow() should be called with page lock of the original hugepage held.
88230 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
88231 @@ -2721,6 +2746,11 @@ retry_avoidcopy:
88232 make_huge_pte(vma, new_page, 1));
88233 page_remove_rmap(old_page);
88234 hugepage_add_new_anon_rmap(new_page, vma, address);
88235 +
88236 +#ifdef CONFIG_PAX_SEGMEXEC
88237 + pax_mirror_huge_pte(vma, address, new_page);
88238 +#endif
88239 +
88240 /* Make the old page be freed below */
88241 new_page = old_page;
88242 }
88243 @@ -2883,6 +2913,10 @@ retry:
88244 && (vma->vm_flags & VM_SHARED)));
88245 set_huge_pte_at(mm, address, ptep, new_pte);
88246
88247 +#ifdef CONFIG_PAX_SEGMEXEC
88248 + pax_mirror_huge_pte(vma, address, page);
88249 +#endif
88250 +
88251 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
88252 /* Optimization, do the COW without a second fault */
88253 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
88254 @@ -2912,6 +2946,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
88255 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
88256 struct hstate *h = hstate_vma(vma);
88257
88258 +#ifdef CONFIG_PAX_SEGMEXEC
88259 + struct vm_area_struct *vma_m;
88260 +#endif
88261 +
88262 address &= huge_page_mask(h);
88263
88264 ptep = huge_pte_offset(mm, address);
88265 @@ -2925,6 +2963,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
88266 VM_FAULT_SET_HINDEX(hstate_index(h));
88267 }
88268
88269 +#ifdef CONFIG_PAX_SEGMEXEC
88270 + vma_m = pax_find_mirror_vma(vma);
88271 + if (vma_m) {
88272 + unsigned long address_m;
88273 +
88274 + if (vma->vm_start > vma_m->vm_start) {
88275 + address_m = address;
88276 + address -= SEGMEXEC_TASK_SIZE;
88277 + vma = vma_m;
88278 + h = hstate_vma(vma);
88279 + } else
88280 + address_m = address + SEGMEXEC_TASK_SIZE;
88281 +
88282 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
88283 + return VM_FAULT_OOM;
88284 + address_m &= HPAGE_MASK;
88285 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
88286 + }
88287 +#endif
88288 +
88289 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
88290 if (!ptep)
88291 return VM_FAULT_OOM;
88292 diff --git a/mm/internal.h b/mm/internal.h
88293 index 684f7aa..9eb9edc 100644
88294 --- a/mm/internal.h
88295 +++ b/mm/internal.h
88296 @@ -97,6 +97,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
88297 * in mm/page_alloc.c
88298 */
88299 extern void __free_pages_bootmem(struct page *page, unsigned int order);
88300 +extern void free_compound_page(struct page *page);
88301 extern void prep_compound_page(struct page *page, unsigned long order);
88302 #ifdef CONFIG_MEMORY_FAILURE
88303 extern bool is_free_buddy_page(struct page *page);
88304 @@ -352,7 +353,7 @@ extern u32 hwpoison_filter_enable;
88305
88306 extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
88307 unsigned long, unsigned long,
88308 - unsigned long, unsigned long);
88309 + unsigned long, unsigned long) __intentional_overflow(-1);
88310
88311 extern void set_pageblock_order(void);
88312 unsigned long reclaim_clean_pages_from_list(struct zone *zone,
88313 diff --git a/mm/kmemleak.c b/mm/kmemleak.c
88314 index e126b0e..e986018 100644
88315 --- a/mm/kmemleak.c
88316 +++ b/mm/kmemleak.c
88317 @@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
88318
88319 for (i = 0; i < object->trace_len; i++) {
88320 void *ptr = (void *)object->trace[i];
88321 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
88322 + seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
88323 }
88324 }
88325
88326 @@ -1851,7 +1851,7 @@ static int __init kmemleak_late_init(void)
88327 return -ENOMEM;
88328 }
88329
88330 - dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
88331 + dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
88332 &kmemleak_fops);
88333 if (!dentry)
88334 pr_warning("Failed to create the debugfs kmemleak file\n");
88335 diff --git a/mm/maccess.c b/mm/maccess.c
88336 index d53adf9..03a24bf 100644
88337 --- a/mm/maccess.c
88338 +++ b/mm/maccess.c
88339 @@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
88340 set_fs(KERNEL_DS);
88341 pagefault_disable();
88342 ret = __copy_from_user_inatomic(dst,
88343 - (__force const void __user *)src, size);
88344 + (const void __force_user *)src, size);
88345 pagefault_enable();
88346 set_fs(old_fs);
88347
88348 @@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
88349
88350 set_fs(KERNEL_DS);
88351 pagefault_disable();
88352 - ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
88353 + ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
88354 pagefault_enable();
88355 set_fs(old_fs);
88356
88357 diff --git a/mm/madvise.c b/mm/madvise.c
88358 index 539eeb9..e24a987 100644
88359 --- a/mm/madvise.c
88360 +++ b/mm/madvise.c
88361 @@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct *vma,
88362 pgoff_t pgoff;
88363 unsigned long new_flags = vma->vm_flags;
88364
88365 +#ifdef CONFIG_PAX_SEGMEXEC
88366 + struct vm_area_struct *vma_m;
88367 +#endif
88368 +
88369 switch (behavior) {
88370 case MADV_NORMAL:
88371 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
88372 @@ -126,6 +130,13 @@ success:
88373 /*
88374 * vm_flags is protected by the mmap_sem held in write mode.
88375 */
88376 +
88377 +#ifdef CONFIG_PAX_SEGMEXEC
88378 + vma_m = pax_find_mirror_vma(vma);
88379 + if (vma_m)
88380 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
88381 +#endif
88382 +
88383 vma->vm_flags = new_flags;
88384
88385 out:
88386 @@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct *vma,
88387 struct vm_area_struct **prev,
88388 unsigned long start, unsigned long end)
88389 {
88390 +
88391 +#ifdef CONFIG_PAX_SEGMEXEC
88392 + struct vm_area_struct *vma_m;
88393 +#endif
88394 +
88395 *prev = vma;
88396 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
88397 return -EINVAL;
88398 @@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct *vma,
88399 zap_page_range(vma, start, end - start, &details);
88400 } else
88401 zap_page_range(vma, start, end - start, NULL);
88402 +
88403 +#ifdef CONFIG_PAX_SEGMEXEC
88404 + vma_m = pax_find_mirror_vma(vma);
88405 + if (vma_m) {
88406 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
88407 + struct zap_details details = {
88408 + .nonlinear_vma = vma_m,
88409 + .last_index = ULONG_MAX,
88410 + };
88411 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
88412 + } else
88413 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
88414 + }
88415 +#endif
88416 +
88417 return 0;
88418 }
88419
88420 @@ -491,6 +522,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
88421 if (end < start)
88422 return error;
88423
88424 +#ifdef CONFIG_PAX_SEGMEXEC
88425 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
88426 + if (end > SEGMEXEC_TASK_SIZE)
88427 + return error;
88428 + } else
88429 +#endif
88430 +
88431 + if (end > TASK_SIZE)
88432 + return error;
88433 +
88434 error = 0;
88435 if (end == start)
88436 return error;
88437 diff --git a/mm/memory-failure.c b/mm/memory-failure.c
88438 index bf3351b..aea800d 100644
88439 --- a/mm/memory-failure.c
88440 +++ b/mm/memory-failure.c
88441 @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
88442
88443 int sysctl_memory_failure_recovery __read_mostly = 1;
88444
88445 -atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
88446 +atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
88447
88448 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
88449
88450 @@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
88451 pfn, t->comm, t->pid);
88452 si.si_signo = SIGBUS;
88453 si.si_errno = 0;
88454 - si.si_addr = (void *)addr;
88455 + si.si_addr = (void __user *)addr;
88456 #ifdef __ARCH_SI_TRAPNO
88457 si.si_trapno = trapno;
88458 #endif
88459 @@ -762,7 +762,7 @@ static struct page_state {
88460 unsigned long res;
88461 char *msg;
88462 int (*action)(struct page *p, unsigned long pfn);
88463 -} error_states[] = {
88464 +} __do_const error_states[] = {
88465 { reserved, reserved, "reserved kernel", me_kernel },
88466 /*
88467 * free pages are specially detected outside this table:
88468 @@ -1053,7 +1053,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
88469 nr_pages = 1 << compound_order(hpage);
88470 else /* normal page or thp */
88471 nr_pages = 1;
88472 - atomic_long_add(nr_pages, &num_poisoned_pages);
88473 + atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
88474
88475 /*
88476 * We need/can do nothing about count=0 pages.
88477 @@ -1083,7 +1083,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
88478 if (!PageHWPoison(hpage)
88479 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
88480 || (p != hpage && TestSetPageHWPoison(hpage))) {
88481 - atomic_long_sub(nr_pages, &num_poisoned_pages);
88482 + atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
88483 return 0;
88484 }
88485 set_page_hwpoison_huge_page(hpage);
88486 @@ -1152,7 +1152,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
88487 }
88488 if (hwpoison_filter(p)) {
88489 if (TestClearPageHWPoison(p))
88490 - atomic_long_sub(nr_pages, &num_poisoned_pages);
88491 + atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
88492 unlock_page(hpage);
88493 put_page(hpage);
88494 return 0;
88495 @@ -1370,7 +1370,7 @@ int unpoison_memory(unsigned long pfn)
88496 return 0;
88497 }
88498 if (TestClearPageHWPoison(p))
88499 - atomic_long_dec(&num_poisoned_pages);
88500 + atomic_long_dec_unchecked(&num_poisoned_pages);
88501 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
88502 return 0;
88503 }
88504 @@ -1384,7 +1384,7 @@ int unpoison_memory(unsigned long pfn)
88505 */
88506 if (TestClearPageHWPoison(page)) {
88507 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
88508 - atomic_long_sub(nr_pages, &num_poisoned_pages);
88509 + atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
88510 freeit = 1;
88511 if (PageHuge(page))
88512 clear_page_hwpoison_huge_page(page);
88513 @@ -1521,7 +1521,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
88514 } else {
88515 set_page_hwpoison_huge_page(hpage);
88516 dequeue_hwpoisoned_huge_page(hpage);
88517 - atomic_long_add(1 << compound_order(hpage),
88518 + atomic_long_add_unchecked(1 << compound_order(hpage),
88519 &num_poisoned_pages);
88520 }
88521 return ret;
88522 @@ -1560,7 +1560,7 @@ static int __soft_offline_page(struct page *page, int flags)
88523 put_page(page);
88524 pr_info("soft_offline: %#lx: invalidated\n", pfn);
88525 SetPageHWPoison(page);
88526 - atomic_long_inc(&num_poisoned_pages);
88527 + atomic_long_inc_unchecked(&num_poisoned_pages);
88528 return 0;
88529 }
88530
88531 @@ -1605,7 +1605,7 @@ static int __soft_offline_page(struct page *page, int flags)
88532 if (!is_free_buddy_page(page))
88533 pr_info("soft offline: %#lx: page leaked\n",
88534 pfn);
88535 - atomic_long_inc(&num_poisoned_pages);
88536 + atomic_long_inc_unchecked(&num_poisoned_pages);
88537 }
88538 } else {
88539 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
88540 @@ -1666,11 +1666,11 @@ int soft_offline_page(struct page *page, int flags)
88541 if (PageHuge(page)) {
88542 set_page_hwpoison_huge_page(hpage);
88543 dequeue_hwpoisoned_huge_page(hpage);
88544 - atomic_long_add(1 << compound_order(hpage),
88545 + atomic_long_add_unchecked(1 << compound_order(hpage),
88546 &num_poisoned_pages);
88547 } else {
88548 SetPageHWPoison(page);
88549 - atomic_long_inc(&num_poisoned_pages);
88550 + atomic_long_inc_unchecked(&num_poisoned_pages);
88551 }
88552 }
88553 unset:
88554 diff --git a/mm/memory.c b/mm/memory.c
88555 index d176154..cd1b387 100644
88556 --- a/mm/memory.c
88557 +++ b/mm/memory.c
88558 @@ -402,6 +402,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
88559 free_pte_range(tlb, pmd, addr);
88560 } while (pmd++, addr = next, addr != end);
88561
88562 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
88563 start &= PUD_MASK;
88564 if (start < floor)
88565 return;
88566 @@ -416,6 +417,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
88567 pmd = pmd_offset(pud, start);
88568 pud_clear(pud);
88569 pmd_free_tlb(tlb, pmd, start);
88570 +#endif
88571 +
88572 }
88573
88574 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
88575 @@ -435,6 +438,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
88576 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
88577 } while (pud++, addr = next, addr != end);
88578
88579 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
88580 start &= PGDIR_MASK;
88581 if (start < floor)
88582 return;
88583 @@ -449,6 +453,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
88584 pud = pud_offset(pgd, start);
88585 pgd_clear(pgd);
88586 pud_free_tlb(tlb, pud, start);
88587 +#endif
88588 +
88589 }
88590
88591 /*
88592 @@ -1636,12 +1642,6 @@ no_page_table:
88593 return page;
88594 }
88595
88596 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
88597 -{
88598 - return stack_guard_page_start(vma, addr) ||
88599 - stack_guard_page_end(vma, addr+PAGE_SIZE);
88600 -}
88601 -
88602 /**
88603 * __get_user_pages() - pin user pages in memory
88604 * @tsk: task_struct of target task
88605 @@ -1728,10 +1728,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
88606
88607 i = 0;
88608
88609 - do {
88610 + while (nr_pages) {
88611 struct vm_area_struct *vma;
88612
88613 - vma = find_extend_vma(mm, start);
88614 + vma = find_vma(mm, start);
88615 if (!vma && in_gate_area(mm, start)) {
88616 unsigned long pg = start & PAGE_MASK;
88617 pgd_t *pgd;
88618 @@ -1780,7 +1780,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
88619 goto next_page;
88620 }
88621
88622 - if (!vma ||
88623 + if (!vma || start < vma->vm_start ||
88624 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
88625 !(vm_flags & vma->vm_flags))
88626 return i ? : -EFAULT;
88627 @@ -1809,11 +1809,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
88628 int ret;
88629 unsigned int fault_flags = 0;
88630
88631 - /* For mlock, just skip the stack guard page. */
88632 - if (foll_flags & FOLL_MLOCK) {
88633 - if (stack_guard_page(vma, start))
88634 - goto next_page;
88635 - }
88636 if (foll_flags & FOLL_WRITE)
88637 fault_flags |= FAULT_FLAG_WRITE;
88638 if (nonblocking)
88639 @@ -1893,7 +1888,7 @@ next_page:
88640 start += page_increm * PAGE_SIZE;
88641 nr_pages -= page_increm;
88642 } while (nr_pages && start < vma->vm_end);
88643 - } while (nr_pages);
88644 + }
88645 return i;
88646 }
88647 EXPORT_SYMBOL(__get_user_pages);
88648 @@ -2100,6 +2095,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
88649 page_add_file_rmap(page);
88650 set_pte_at(mm, addr, pte, mk_pte(page, prot));
88651
88652 +#ifdef CONFIG_PAX_SEGMEXEC
88653 + pax_mirror_file_pte(vma, addr, page, ptl);
88654 +#endif
88655 +
88656 retval = 0;
88657 pte_unmap_unlock(pte, ptl);
88658 return retval;
88659 @@ -2144,9 +2143,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
88660 if (!page_count(page))
88661 return -EINVAL;
88662 if (!(vma->vm_flags & VM_MIXEDMAP)) {
88663 +
88664 +#ifdef CONFIG_PAX_SEGMEXEC
88665 + struct vm_area_struct *vma_m;
88666 +#endif
88667 +
88668 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
88669 BUG_ON(vma->vm_flags & VM_PFNMAP);
88670 vma->vm_flags |= VM_MIXEDMAP;
88671 +
88672 +#ifdef CONFIG_PAX_SEGMEXEC
88673 + vma_m = pax_find_mirror_vma(vma);
88674 + if (vma_m)
88675 + vma_m->vm_flags |= VM_MIXEDMAP;
88676 +#endif
88677 +
88678 }
88679 return insert_page(vma, addr, page, vma->vm_page_prot);
88680 }
88681 @@ -2229,6 +2240,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
88682 unsigned long pfn)
88683 {
88684 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
88685 + BUG_ON(vma->vm_mirror);
88686
88687 if (addr < vma->vm_start || addr >= vma->vm_end)
88688 return -EFAULT;
88689 @@ -2476,7 +2488,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
88690
88691 BUG_ON(pud_huge(*pud));
88692
88693 - pmd = pmd_alloc(mm, pud, addr);
88694 + pmd = (mm == &init_mm) ?
88695 + pmd_alloc_kernel(mm, pud, addr) :
88696 + pmd_alloc(mm, pud, addr);
88697 if (!pmd)
88698 return -ENOMEM;
88699 do {
88700 @@ -2496,7 +2510,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
88701 unsigned long next;
88702 int err;
88703
88704 - pud = pud_alloc(mm, pgd, addr);
88705 + pud = (mm == &init_mm) ?
88706 + pud_alloc_kernel(mm, pgd, addr) :
88707 + pud_alloc(mm, pgd, addr);
88708 if (!pud)
88709 return -ENOMEM;
88710 do {
88711 @@ -2584,6 +2600,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
88712 copy_user_highpage(dst, src, va, vma);
88713 }
88714
88715 +#ifdef CONFIG_PAX_SEGMEXEC
88716 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
88717 +{
88718 + struct mm_struct *mm = vma->vm_mm;
88719 + spinlock_t *ptl;
88720 + pte_t *pte, entry;
88721 +
88722 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
88723 + entry = *pte;
88724 + if (!pte_present(entry)) {
88725 + if (!pte_none(entry)) {
88726 + BUG_ON(pte_file(entry));
88727 + free_swap_and_cache(pte_to_swp_entry(entry));
88728 + pte_clear_not_present_full(mm, address, pte, 0);
88729 + }
88730 + } else {
88731 + struct page *page;
88732 +
88733 + flush_cache_page(vma, address, pte_pfn(entry));
88734 + entry = ptep_clear_flush(vma, address, pte);
88735 + BUG_ON(pte_dirty(entry));
88736 + page = vm_normal_page(vma, address, entry);
88737 + if (page) {
88738 + update_hiwater_rss(mm);
88739 + if (PageAnon(page))
88740 + dec_mm_counter_fast(mm, MM_ANONPAGES);
88741 + else
88742 + dec_mm_counter_fast(mm, MM_FILEPAGES);
88743 + page_remove_rmap(page);
88744 + page_cache_release(page);
88745 + }
88746 + }
88747 + pte_unmap_unlock(pte, ptl);
88748 +}
88749 +
88750 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
88751 + *
88752 + * the ptl of the lower mapped page is held on entry and is not released on exit
88753 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
88754 + */
88755 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
88756 +{
88757 + struct mm_struct *mm = vma->vm_mm;
88758 + unsigned long address_m;
88759 + spinlock_t *ptl_m;
88760 + struct vm_area_struct *vma_m;
88761 + pmd_t *pmd_m;
88762 + pte_t *pte_m, entry_m;
88763 +
88764 + BUG_ON(!page_m || !PageAnon(page_m));
88765 +
88766 + vma_m = pax_find_mirror_vma(vma);
88767 + if (!vma_m)
88768 + return;
88769 +
88770 + BUG_ON(!PageLocked(page_m));
88771 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
88772 + address_m = address + SEGMEXEC_TASK_SIZE;
88773 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
88774 + pte_m = pte_offset_map(pmd_m, address_m);
88775 + ptl_m = pte_lockptr(mm, pmd_m);
88776 + if (ptl != ptl_m) {
88777 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
88778 + if (!pte_none(*pte_m))
88779 + goto out;
88780 + }
88781 +
88782 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
88783 + page_cache_get(page_m);
88784 + page_add_anon_rmap(page_m, vma_m, address_m);
88785 + inc_mm_counter_fast(mm, MM_ANONPAGES);
88786 + set_pte_at(mm, address_m, pte_m, entry_m);
88787 + update_mmu_cache(vma_m, address_m, pte_m);
88788 +out:
88789 + if (ptl != ptl_m)
88790 + spin_unlock(ptl_m);
88791 + pte_unmap(pte_m);
88792 + unlock_page(page_m);
88793 +}
88794 +
88795 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
88796 +{
88797 + struct mm_struct *mm = vma->vm_mm;
88798 + unsigned long address_m;
88799 + spinlock_t *ptl_m;
88800 + struct vm_area_struct *vma_m;
88801 + pmd_t *pmd_m;
88802 + pte_t *pte_m, entry_m;
88803 +
88804 + BUG_ON(!page_m || PageAnon(page_m));
88805 +
88806 + vma_m = pax_find_mirror_vma(vma);
88807 + if (!vma_m)
88808 + return;
88809 +
88810 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
88811 + address_m = address + SEGMEXEC_TASK_SIZE;
88812 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
88813 + pte_m = pte_offset_map(pmd_m, address_m);
88814 + ptl_m = pte_lockptr(mm, pmd_m);
88815 + if (ptl != ptl_m) {
88816 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
88817 + if (!pte_none(*pte_m))
88818 + goto out;
88819 + }
88820 +
88821 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
88822 + page_cache_get(page_m);
88823 + page_add_file_rmap(page_m);
88824 + inc_mm_counter_fast(mm, MM_FILEPAGES);
88825 + set_pte_at(mm, address_m, pte_m, entry_m);
88826 + update_mmu_cache(vma_m, address_m, pte_m);
88827 +out:
88828 + if (ptl != ptl_m)
88829 + spin_unlock(ptl_m);
88830 + pte_unmap(pte_m);
88831 +}
88832 +
88833 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
88834 +{
88835 + struct mm_struct *mm = vma->vm_mm;
88836 + unsigned long address_m;
88837 + spinlock_t *ptl_m;
88838 + struct vm_area_struct *vma_m;
88839 + pmd_t *pmd_m;
88840 + pte_t *pte_m, entry_m;
88841 +
88842 + vma_m = pax_find_mirror_vma(vma);
88843 + if (!vma_m)
88844 + return;
88845 +
88846 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
88847 + address_m = address + SEGMEXEC_TASK_SIZE;
88848 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
88849 + pte_m = pte_offset_map(pmd_m, address_m);
88850 + ptl_m = pte_lockptr(mm, pmd_m);
88851 + if (ptl != ptl_m) {
88852 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
88853 + if (!pte_none(*pte_m))
88854 + goto out;
88855 + }
88856 +
88857 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
88858 + set_pte_at(mm, address_m, pte_m, entry_m);
88859 +out:
88860 + if (ptl != ptl_m)
88861 + spin_unlock(ptl_m);
88862 + pte_unmap(pte_m);
88863 +}
88864 +
88865 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
88866 +{
88867 + struct page *page_m;
88868 + pte_t entry;
88869 +
88870 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
88871 + goto out;
88872 +
88873 + entry = *pte;
88874 + page_m = vm_normal_page(vma, address, entry);
88875 + if (!page_m)
88876 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
88877 + else if (PageAnon(page_m)) {
88878 + if (pax_find_mirror_vma(vma)) {
88879 + pte_unmap_unlock(pte, ptl);
88880 + lock_page(page_m);
88881 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
88882 + if (pte_same(entry, *pte))
88883 + pax_mirror_anon_pte(vma, address, page_m, ptl);
88884 + else
88885 + unlock_page(page_m);
88886 + }
88887 + } else
88888 + pax_mirror_file_pte(vma, address, page_m, ptl);
88889 +
88890 +out:
88891 + pte_unmap_unlock(pte, ptl);
88892 +}
88893 +#endif
88894 +
88895 /*
88896 * This routine handles present pages, when users try to write
88897 * to a shared page. It is done by copying the page to a new address
88898 @@ -2800,6 +2996,12 @@ gotten:
88899 */
88900 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
88901 if (likely(pte_same(*page_table, orig_pte))) {
88902 +
88903 +#ifdef CONFIG_PAX_SEGMEXEC
88904 + if (pax_find_mirror_vma(vma))
88905 + BUG_ON(!trylock_page(new_page));
88906 +#endif
88907 +
88908 if (old_page) {
88909 if (!PageAnon(old_page)) {
88910 dec_mm_counter_fast(mm, MM_FILEPAGES);
88911 @@ -2851,6 +3053,10 @@ gotten:
88912 page_remove_rmap(old_page);
88913 }
88914
88915 +#ifdef CONFIG_PAX_SEGMEXEC
88916 + pax_mirror_anon_pte(vma, address, new_page, ptl);
88917 +#endif
88918 +
88919 /* Free the old page.. */
88920 new_page = old_page;
88921 ret |= VM_FAULT_WRITE;
88922 @@ -3128,6 +3334,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
88923 swap_free(entry);
88924 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
88925 try_to_free_swap(page);
88926 +
88927 +#ifdef CONFIG_PAX_SEGMEXEC
88928 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
88929 +#endif
88930 +
88931 unlock_page(page);
88932 if (page != swapcache) {
88933 /*
88934 @@ -3151,6 +3362,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
88935
88936 /* No need to invalidate - it was non-present before */
88937 update_mmu_cache(vma, address, page_table);
88938 +
88939 +#ifdef CONFIG_PAX_SEGMEXEC
88940 + pax_mirror_anon_pte(vma, address, page, ptl);
88941 +#endif
88942 +
88943 unlock:
88944 pte_unmap_unlock(page_table, ptl);
88945 out:
88946 @@ -3170,40 +3386,6 @@ out_release:
88947 }
88948
88949 /*
88950 - * This is like a special single-page "expand_{down|up}wards()",
88951 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
88952 - * doesn't hit another vma.
88953 - */
88954 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
88955 -{
88956 - address &= PAGE_MASK;
88957 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
88958 - struct vm_area_struct *prev = vma->vm_prev;
88959 -
88960 - /*
88961 - * Is there a mapping abutting this one below?
88962 - *
88963 - * That's only ok if it's the same stack mapping
88964 - * that has gotten split..
88965 - */
88966 - if (prev && prev->vm_end == address)
88967 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
88968 -
88969 - expand_downwards(vma, address - PAGE_SIZE);
88970 - }
88971 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
88972 - struct vm_area_struct *next = vma->vm_next;
88973 -
88974 - /* As VM_GROWSDOWN but s/below/above/ */
88975 - if (next && next->vm_start == address + PAGE_SIZE)
88976 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
88977 -
88978 - expand_upwards(vma, address + PAGE_SIZE);
88979 - }
88980 - return 0;
88981 -}
88982 -
88983 -/*
88984 * We enter with non-exclusive mmap_sem (to exclude vma changes,
88985 * but allow concurrent faults), and pte mapped but not yet locked.
88986 * We return with mmap_sem still held, but pte unmapped and unlocked.
88987 @@ -3212,27 +3394,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
88988 unsigned long address, pte_t *page_table, pmd_t *pmd,
88989 unsigned int flags)
88990 {
88991 - struct page *page;
88992 + struct page *page = NULL;
88993 spinlock_t *ptl;
88994 pte_t entry;
88995
88996 - pte_unmap(page_table);
88997 -
88998 - /* Check if we need to add a guard page to the stack */
88999 - if (check_stack_guard_page(vma, address) < 0)
89000 - return VM_FAULT_SIGBUS;
89001 -
89002 - /* Use the zero-page for reads */
89003 if (!(flags & FAULT_FLAG_WRITE)) {
89004 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
89005 vma->vm_page_prot));
89006 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
89007 + ptl = pte_lockptr(mm, pmd);
89008 + spin_lock(ptl);
89009 if (!pte_none(*page_table))
89010 goto unlock;
89011 goto setpte;
89012 }
89013
89014 /* Allocate our own private page. */
89015 + pte_unmap(page_table);
89016 +
89017 if (unlikely(anon_vma_prepare(vma)))
89018 goto oom;
89019 page = alloc_zeroed_user_highpage_movable(vma, address);
89020 @@ -3256,6 +3434,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
89021 if (!pte_none(*page_table))
89022 goto release;
89023
89024 +#ifdef CONFIG_PAX_SEGMEXEC
89025 + if (pax_find_mirror_vma(vma))
89026 + BUG_ON(!trylock_page(page));
89027 +#endif
89028 +
89029 inc_mm_counter_fast(mm, MM_ANONPAGES);
89030 page_add_new_anon_rmap(page, vma, address);
89031 setpte:
89032 @@ -3263,6 +3446,12 @@ setpte:
89033
89034 /* No need to invalidate - it was non-present before */
89035 update_mmu_cache(vma, address, page_table);
89036 +
89037 +#ifdef CONFIG_PAX_SEGMEXEC
89038 + if (page)
89039 + pax_mirror_anon_pte(vma, address, page, ptl);
89040 +#endif
89041 +
89042 unlock:
89043 pte_unmap_unlock(page_table, ptl);
89044 return 0;
89045 @@ -3406,6 +3595,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
89046 */
89047 /* Only go through if we didn't race with anybody else... */
89048 if (likely(pte_same(*page_table, orig_pte))) {
89049 +
89050 +#ifdef CONFIG_PAX_SEGMEXEC
89051 + if (anon && pax_find_mirror_vma(vma))
89052 + BUG_ON(!trylock_page(page));
89053 +#endif
89054 +
89055 flush_icache_page(vma, page);
89056 entry = mk_pte(page, vma->vm_page_prot);
89057 if (flags & FAULT_FLAG_WRITE)
89058 @@ -3427,6 +3622,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
89059
89060 /* no need to invalidate: a not-present page won't be cached */
89061 update_mmu_cache(vma, address, page_table);
89062 +
89063 +#ifdef CONFIG_PAX_SEGMEXEC
89064 + if (anon)
89065 + pax_mirror_anon_pte(vma, address, page, ptl);
89066 + else
89067 + pax_mirror_file_pte(vma, address, page, ptl);
89068 +#endif
89069 +
89070 } else {
89071 if (cow_page)
89072 mem_cgroup_uncharge_page(cow_page);
89073 @@ -3737,6 +3940,12 @@ static int handle_pte_fault(struct mm_struct *mm,
89074 if (flags & FAULT_FLAG_WRITE)
89075 flush_tlb_fix_spurious_fault(vma, address);
89076 }
89077 +
89078 +#ifdef CONFIG_PAX_SEGMEXEC
89079 + pax_mirror_pte(vma, address, pte, pmd, ptl);
89080 + return 0;
89081 +#endif
89082 +
89083 unlock:
89084 pte_unmap_unlock(pte, ptl);
89085 return 0;
89086 @@ -3753,9 +3962,41 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
89087 pmd_t *pmd;
89088 pte_t *pte;
89089
89090 +#ifdef CONFIG_PAX_SEGMEXEC
89091 + struct vm_area_struct *vma_m;
89092 +#endif
89093 +
89094 if (unlikely(is_vm_hugetlb_page(vma)))
89095 return hugetlb_fault(mm, vma, address, flags);
89096
89097 +#ifdef CONFIG_PAX_SEGMEXEC
89098 + vma_m = pax_find_mirror_vma(vma);
89099 + if (vma_m) {
89100 + unsigned long address_m;
89101 + pgd_t *pgd_m;
89102 + pud_t *pud_m;
89103 + pmd_t *pmd_m;
89104 +
89105 + if (vma->vm_start > vma_m->vm_start) {
89106 + address_m = address;
89107 + address -= SEGMEXEC_TASK_SIZE;
89108 + vma = vma_m;
89109 + } else
89110 + address_m = address + SEGMEXEC_TASK_SIZE;
89111 +
89112 + pgd_m = pgd_offset(mm, address_m);
89113 + pud_m = pud_alloc(mm, pgd_m, address_m);
89114 + if (!pud_m)
89115 + return VM_FAULT_OOM;
89116 + pmd_m = pmd_alloc(mm, pud_m, address_m);
89117 + if (!pmd_m)
89118 + return VM_FAULT_OOM;
89119 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
89120 + return VM_FAULT_OOM;
89121 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
89122 + }
89123 +#endif
89124 +
89125 retry:
89126 pgd = pgd_offset(mm, address);
89127 pud = pud_alloc(mm, pgd, address);
89128 @@ -3894,6 +4135,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
89129 spin_unlock(&mm->page_table_lock);
89130 return 0;
89131 }
89132 +
89133 +int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
89134 +{
89135 + pud_t *new = pud_alloc_one(mm, address);
89136 + if (!new)
89137 + return -ENOMEM;
89138 +
89139 + smp_wmb(); /* See comment in __pte_alloc */
89140 +
89141 + spin_lock(&mm->page_table_lock);
89142 + if (pgd_present(*pgd)) /* Another has populated it */
89143 + pud_free(mm, new);
89144 + else
89145 + pgd_populate_kernel(mm, pgd, new);
89146 + spin_unlock(&mm->page_table_lock);
89147 + return 0;
89148 +}
89149 #endif /* __PAGETABLE_PUD_FOLDED */
89150
89151 #ifndef __PAGETABLE_PMD_FOLDED
89152 @@ -3924,6 +4182,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
89153 spin_unlock(&mm->page_table_lock);
89154 return 0;
89155 }
89156 +
89157 +int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
89158 +{
89159 + pmd_t *new = pmd_alloc_one(mm, address);
89160 + if (!new)
89161 + return -ENOMEM;
89162 +
89163 + smp_wmb(); /* See comment in __pte_alloc */
89164 +
89165 + spin_lock(&mm->page_table_lock);
89166 +#ifndef __ARCH_HAS_4LEVEL_HACK
89167 + if (pud_present(*pud)) /* Another has populated it */
89168 + pmd_free(mm, new);
89169 + else
89170 + pud_populate_kernel(mm, pud, new);
89171 +#else
89172 + if (pgd_present(*pud)) /* Another has populated it */
89173 + pmd_free(mm, new);
89174 + else
89175 + pgd_populate_kernel(mm, pud, new);
89176 +#endif /* __ARCH_HAS_4LEVEL_HACK */
89177 + spin_unlock(&mm->page_table_lock);
89178 + return 0;
89179 +}
89180 #endif /* __PAGETABLE_PMD_FOLDED */
89181
89182 #if !defined(__HAVE_ARCH_GATE_AREA)
89183 @@ -3937,7 +4219,7 @@ static int __init gate_vma_init(void)
89184 gate_vma.vm_start = FIXADDR_USER_START;
89185 gate_vma.vm_end = FIXADDR_USER_END;
89186 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
89187 - gate_vma.vm_page_prot = __P101;
89188 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
89189
89190 return 0;
89191 }
89192 @@ -4071,8 +4353,8 @@ out:
89193 return ret;
89194 }
89195
89196 -int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
89197 - void *buf, int len, int write)
89198 +ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
89199 + void *buf, size_t len, int write)
89200 {
89201 resource_size_t phys_addr;
89202 unsigned long prot = 0;
89203 @@ -4098,8 +4380,8 @@ EXPORT_SYMBOL_GPL(generic_access_phys);
89204 * Access another process' address space as given in mm. If non-NULL, use the
89205 * given task for page fault accounting.
89206 */
89207 -static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
89208 - unsigned long addr, void *buf, int len, int write)
89209 +static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
89210 + unsigned long addr, void *buf, size_t len, int write)
89211 {
89212 struct vm_area_struct *vma;
89213 void *old_buf = buf;
89214 @@ -4107,7 +4389,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
89215 down_read(&mm->mmap_sem);
89216 /* ignore errors, just check how much was successfully transferred */
89217 while (len) {
89218 - int bytes, ret, offset;
89219 + ssize_t bytes, ret, offset;
89220 void *maddr;
89221 struct page *page = NULL;
89222
89223 @@ -4166,8 +4448,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
89224 *
89225 * The caller must hold a reference on @mm.
89226 */
89227 -int access_remote_vm(struct mm_struct *mm, unsigned long addr,
89228 - void *buf, int len, int write)
89229 +ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
89230 + void *buf, size_t len, int write)
89231 {
89232 return __access_remote_vm(NULL, mm, addr, buf, len, write);
89233 }
89234 @@ -4177,11 +4459,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
89235 * Source/target buffer must be kernel space,
89236 * Do not walk the page table directly, use get_user_pages
89237 */
89238 -int access_process_vm(struct task_struct *tsk, unsigned long addr,
89239 - void *buf, int len, int write)
89240 +ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
89241 + void *buf, size_t len, int write)
89242 {
89243 struct mm_struct *mm;
89244 - int ret;
89245 + ssize_t ret;
89246
89247 mm = get_task_mm(tsk);
89248 if (!mm)
89249 diff --git a/mm/mempolicy.c b/mm/mempolicy.c
89250 index 0472964..7d5a0ea 100644
89251 --- a/mm/mempolicy.c
89252 +++ b/mm/mempolicy.c
89253 @@ -746,6 +746,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
89254 unsigned long vmstart;
89255 unsigned long vmend;
89256
89257 +#ifdef CONFIG_PAX_SEGMEXEC
89258 + struct vm_area_struct *vma_m;
89259 +#endif
89260 +
89261 vma = find_vma(mm, start);
89262 if (!vma || vma->vm_start > start)
89263 return -EFAULT;
89264 @@ -789,6 +793,16 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
89265 err = vma_replace_policy(vma, new_pol);
89266 if (err)
89267 goto out;
89268 +
89269 +#ifdef CONFIG_PAX_SEGMEXEC
89270 + vma_m = pax_find_mirror_vma(vma);
89271 + if (vma_m) {
89272 + err = vma_replace_policy(vma_m, new_pol);
89273 + if (err)
89274 + goto out;
89275 + }
89276 +#endif
89277 +
89278 }
89279
89280 out:
89281 @@ -1252,6 +1266,17 @@ static long do_mbind(unsigned long start, unsigned long len,
89282
89283 if (end < start)
89284 return -EINVAL;
89285 +
89286 +#ifdef CONFIG_PAX_SEGMEXEC
89287 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
89288 + if (end > SEGMEXEC_TASK_SIZE)
89289 + return -EINVAL;
89290 + } else
89291 +#endif
89292 +
89293 + if (end > TASK_SIZE)
89294 + return -EINVAL;
89295 +
89296 if (end == start)
89297 return 0;
89298
89299 @@ -1480,8 +1505,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
89300 */
89301 tcred = __task_cred(task);
89302 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
89303 - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
89304 - !capable(CAP_SYS_NICE)) {
89305 + !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
89306 rcu_read_unlock();
89307 err = -EPERM;
89308 goto out_put;
89309 @@ -1512,6 +1536,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
89310 goto out;
89311 }
89312
89313 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
89314 + if (mm != current->mm &&
89315 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
89316 + mmput(mm);
89317 + err = -EPERM;
89318 + goto out;
89319 + }
89320 +#endif
89321 +
89322 err = do_migrate_pages(mm, old, new,
89323 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
89324
89325 diff --git a/mm/migrate.c b/mm/migrate.c
89326 index c046927..6996b40 100644
89327 --- a/mm/migrate.c
89328 +++ b/mm/migrate.c
89329 @@ -1404,8 +1404,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
89330 */
89331 tcred = __task_cred(task);
89332 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
89333 - !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
89334 - !capable(CAP_SYS_NICE)) {
89335 + !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
89336 rcu_read_unlock();
89337 err = -EPERM;
89338 goto out;
89339 diff --git a/mm/mlock.c b/mm/mlock.c
89340 index d480cd6..0f98458 100644
89341 --- a/mm/mlock.c
89342 +++ b/mm/mlock.c
89343 @@ -14,6 +14,7 @@
89344 #include <linux/pagevec.h>
89345 #include <linux/mempolicy.h>
89346 #include <linux/syscalls.h>
89347 +#include <linux/security.h>
89348 #include <linux/sched.h>
89349 #include <linux/export.h>
89350 #include <linux/rmap.h>
89351 @@ -568,7 +569,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
89352 {
89353 unsigned long nstart, end, tmp;
89354 struct vm_area_struct * vma, * prev;
89355 - int error;
89356 + int error = 0;
89357
89358 VM_BUG_ON(start & ~PAGE_MASK);
89359 VM_BUG_ON(len != PAGE_ALIGN(len));
89360 @@ -577,6 +578,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
89361 return -EINVAL;
89362 if (end == start)
89363 return 0;
89364 + if (end > TASK_SIZE)
89365 + return -EINVAL;
89366 +
89367 vma = find_vma(current->mm, start);
89368 if (!vma || vma->vm_start > start)
89369 return -ENOMEM;
89370 @@ -588,6 +592,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
89371 for (nstart = start ; ; ) {
89372 vm_flags_t newflags;
89373
89374 +#ifdef CONFIG_PAX_SEGMEXEC
89375 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
89376 + break;
89377 +#endif
89378 +
89379 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
89380
89381 newflags = vma->vm_flags & ~VM_LOCKED;
89382 @@ -700,6 +709,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
89383 lock_limit >>= PAGE_SHIFT;
89384
89385 /* check against resource limits */
89386 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
89387 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
89388 error = do_mlock(start, len, 1);
89389 up_write(&current->mm->mmap_sem);
89390 @@ -734,6 +744,11 @@ static int do_mlockall(int flags)
89391 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
89392 vm_flags_t newflags;
89393
89394 +#ifdef CONFIG_PAX_SEGMEXEC
89395 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
89396 + break;
89397 +#endif
89398 +
89399 newflags = vma->vm_flags & ~VM_LOCKED;
89400 if (flags & MCL_CURRENT)
89401 newflags |= VM_LOCKED;
89402 @@ -767,6 +782,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
89403 lock_limit >>= PAGE_SHIFT;
89404
89405 ret = -ENOMEM;
89406 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
89407 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
89408 capable(CAP_IPC_LOCK))
89409 ret = do_mlockall(flags);
89410 diff --git a/mm/mmap.c b/mm/mmap.c
89411 index 362e5f1..8968e02 100644
89412 --- a/mm/mmap.c
89413 +++ b/mm/mmap.c
89414 @@ -36,6 +36,7 @@
89415 #include <linux/sched/sysctl.h>
89416 #include <linux/notifier.h>
89417 #include <linux/memory.h>
89418 +#include <linux/random.h>
89419
89420 #include <asm/uaccess.h>
89421 #include <asm/cacheflush.h>
89422 @@ -52,6 +53,16 @@
89423 #define arch_rebalance_pgtables(addr, len) (addr)
89424 #endif
89425
89426 +static inline void verify_mm_writelocked(struct mm_struct *mm)
89427 +{
89428 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
89429 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
89430 + up_read(&mm->mmap_sem);
89431 + BUG();
89432 + }
89433 +#endif
89434 +}
89435 +
89436 static void unmap_region(struct mm_struct *mm,
89437 struct vm_area_struct *vma, struct vm_area_struct *prev,
89438 unsigned long start, unsigned long end);
89439 @@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
89440 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
89441 *
89442 */
89443 -pgprot_t protection_map[16] = {
89444 +pgprot_t protection_map[16] __read_only = {
89445 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
89446 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
89447 };
89448
89449 -pgprot_t vm_get_page_prot(unsigned long vm_flags)
89450 +pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
89451 {
89452 - return __pgprot(pgprot_val(protection_map[vm_flags &
89453 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
89454 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
89455 pgprot_val(arch_vm_get_page_prot(vm_flags)));
89456 +
89457 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
89458 + if (!(__supported_pte_mask & _PAGE_NX) &&
89459 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
89460 + (vm_flags & (VM_READ | VM_WRITE)))
89461 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
89462 +#endif
89463 +
89464 + return prot;
89465 }
89466 EXPORT_SYMBOL(vm_get_page_prot);
89467
89468 @@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
89469 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
89470 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
89471 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
89472 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
89473 /*
89474 * Make sure vm_committed_as in one cacheline and not cacheline shared with
89475 * other variables. It can be updated by several CPUs frequently.
89476 @@ -247,6 +268,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
89477 struct vm_area_struct *next = vma->vm_next;
89478
89479 might_sleep();
89480 + BUG_ON(vma->vm_mirror);
89481 if (vma->vm_ops && vma->vm_ops->close)
89482 vma->vm_ops->close(vma);
89483 if (vma->vm_file)
89484 @@ -291,6 +313,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
89485 * not page aligned -Ram Gupta
89486 */
89487 rlim = rlimit(RLIMIT_DATA);
89488 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
89489 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
89490 (mm->end_data - mm->start_data) > rlim)
89491 goto out;
89492 @@ -933,6 +956,12 @@ static int
89493 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
89494 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
89495 {
89496 +
89497 +#ifdef CONFIG_PAX_SEGMEXEC
89498 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
89499 + return 0;
89500 +#endif
89501 +
89502 if (is_mergeable_vma(vma, file, vm_flags) &&
89503 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
89504 if (vma->vm_pgoff == vm_pgoff)
89505 @@ -952,6 +981,12 @@ static int
89506 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
89507 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
89508 {
89509 +
89510 +#ifdef CONFIG_PAX_SEGMEXEC
89511 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
89512 + return 0;
89513 +#endif
89514 +
89515 if (is_mergeable_vma(vma, file, vm_flags) &&
89516 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
89517 pgoff_t vm_pglen;
89518 @@ -994,13 +1029,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
89519 struct vm_area_struct *vma_merge(struct mm_struct *mm,
89520 struct vm_area_struct *prev, unsigned long addr,
89521 unsigned long end, unsigned long vm_flags,
89522 - struct anon_vma *anon_vma, struct file *file,
89523 + struct anon_vma *anon_vma, struct file *file,
89524 pgoff_t pgoff, struct mempolicy *policy)
89525 {
89526 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
89527 struct vm_area_struct *area, *next;
89528 int err;
89529
89530 +#ifdef CONFIG_PAX_SEGMEXEC
89531 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
89532 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
89533 +
89534 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
89535 +#endif
89536 +
89537 /*
89538 * We later require that vma->vm_flags == vm_flags,
89539 * so this tests vma->vm_flags & VM_SPECIAL, too.
89540 @@ -1016,6 +1058,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
89541 if (next && next->vm_end == end) /* cases 6, 7, 8 */
89542 next = next->vm_next;
89543
89544 +#ifdef CONFIG_PAX_SEGMEXEC
89545 + if (prev)
89546 + prev_m = pax_find_mirror_vma(prev);
89547 + if (area)
89548 + area_m = pax_find_mirror_vma(area);
89549 + if (next)
89550 + next_m = pax_find_mirror_vma(next);
89551 +#endif
89552 +
89553 /*
89554 * Can it merge with the predecessor?
89555 */
89556 @@ -1035,9 +1086,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
89557 /* cases 1, 6 */
89558 err = vma_adjust(prev, prev->vm_start,
89559 next->vm_end, prev->vm_pgoff, NULL);
89560 - } else /* cases 2, 5, 7 */
89561 +
89562 +#ifdef CONFIG_PAX_SEGMEXEC
89563 + if (!err && prev_m)
89564 + err = vma_adjust(prev_m, prev_m->vm_start,
89565 + next_m->vm_end, prev_m->vm_pgoff, NULL);
89566 +#endif
89567 +
89568 + } else { /* cases 2, 5, 7 */
89569 err = vma_adjust(prev, prev->vm_start,
89570 end, prev->vm_pgoff, NULL);
89571 +
89572 +#ifdef CONFIG_PAX_SEGMEXEC
89573 + if (!err && prev_m)
89574 + err = vma_adjust(prev_m, prev_m->vm_start,
89575 + end_m, prev_m->vm_pgoff, NULL);
89576 +#endif
89577 +
89578 + }
89579 if (err)
89580 return NULL;
89581 khugepaged_enter_vma_merge(prev);
89582 @@ -1051,12 +1117,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
89583 mpol_equal(policy, vma_policy(next)) &&
89584 can_vma_merge_before(next, vm_flags,
89585 anon_vma, file, pgoff+pglen)) {
89586 - if (prev && addr < prev->vm_end) /* case 4 */
89587 + if (prev && addr < prev->vm_end) { /* case 4 */
89588 err = vma_adjust(prev, prev->vm_start,
89589 addr, prev->vm_pgoff, NULL);
89590 - else /* cases 3, 8 */
89591 +
89592 +#ifdef CONFIG_PAX_SEGMEXEC
89593 + if (!err && prev_m)
89594 + err = vma_adjust(prev_m, prev_m->vm_start,
89595 + addr_m, prev_m->vm_pgoff, NULL);
89596 +#endif
89597 +
89598 + } else { /* cases 3, 8 */
89599 err = vma_adjust(area, addr, next->vm_end,
89600 next->vm_pgoff - pglen, NULL);
89601 +
89602 +#ifdef CONFIG_PAX_SEGMEXEC
89603 + if (!err && area_m)
89604 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
89605 + next_m->vm_pgoff - pglen, NULL);
89606 +#endif
89607 +
89608 + }
89609 if (err)
89610 return NULL;
89611 khugepaged_enter_vma_merge(area);
89612 @@ -1165,8 +1246,10 @@ none:
89613 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
89614 struct file *file, long pages)
89615 {
89616 - const unsigned long stack_flags
89617 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
89618 +
89619 +#ifdef CONFIG_PAX_RANDMMAP
89620 + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
89621 +#endif
89622
89623 mm->total_vm += pages;
89624
89625 @@ -1174,7 +1257,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
89626 mm->shared_vm += pages;
89627 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
89628 mm->exec_vm += pages;
89629 - } else if (flags & stack_flags)
89630 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
89631 mm->stack_vm += pages;
89632 }
89633 #endif /* CONFIG_PROC_FS */
89634 @@ -1212,7 +1295,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
89635 * (the exception is when the underlying filesystem is noexec
89636 * mounted, in which case we dont add PROT_EXEC.)
89637 */
89638 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
89639 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
89640 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
89641 prot |= PROT_EXEC;
89642
89643 @@ -1238,7 +1321,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
89644 /* Obtain the address to map to. we verify (or select) it and ensure
89645 * that it represents a valid section of the address space.
89646 */
89647 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
89648 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
89649 if (addr & ~PAGE_MASK)
89650 return addr;
89651
89652 @@ -1249,6 +1332,43 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
89653 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
89654 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
89655
89656 +#ifdef CONFIG_PAX_MPROTECT
89657 + if (mm->pax_flags & MF_PAX_MPROTECT) {
89658 +
89659 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
89660 + if (file && !pgoff && (vm_flags & VM_EXEC) && mm->binfmt &&
89661 + mm->binfmt->handle_mmap)
89662 + mm->binfmt->handle_mmap(file);
89663 +#endif
89664 +
89665 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
89666 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
89667 + gr_log_rwxmmap(file);
89668 +
89669 +#ifdef CONFIG_PAX_EMUPLT
89670 + vm_flags &= ~VM_EXEC;
89671 +#else
89672 + return -EPERM;
89673 +#endif
89674 +
89675 + }
89676 +
89677 + if (!(vm_flags & VM_EXEC))
89678 + vm_flags &= ~VM_MAYEXEC;
89679 +#else
89680 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
89681 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
89682 +#endif
89683 + else
89684 + vm_flags &= ~VM_MAYWRITE;
89685 + }
89686 +#endif
89687 +
89688 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
89689 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
89690 + vm_flags &= ~VM_PAGEEXEC;
89691 +#endif
89692 +
89693 if (flags & MAP_LOCKED)
89694 if (!can_do_mlock())
89695 return -EPERM;
89696 @@ -1260,6 +1380,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
89697 locked += mm->locked_vm;
89698 lock_limit = rlimit(RLIMIT_MEMLOCK);
89699 lock_limit >>= PAGE_SHIFT;
89700 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
89701 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
89702 return -EAGAIN;
89703 }
89704 @@ -1344,6 +1465,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
89705 vm_flags |= VM_NORESERVE;
89706 }
89707
89708 + if (!gr_acl_handle_mmap(file, prot))
89709 + return -EACCES;
89710 +
89711 addr = mmap_region(file, addr, len, vm_flags, pgoff);
89712 if (!IS_ERR_VALUE(addr) &&
89713 ((vm_flags & VM_LOCKED) ||
89714 @@ -1437,7 +1561,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
89715 vm_flags_t vm_flags = vma->vm_flags;
89716
89717 /* If it was private or non-writable, the write bit is already clear */
89718 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
89719 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
89720 return 0;
89721
89722 /* The backer wishes to know when pages are first written to? */
89723 @@ -1483,7 +1607,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
89724 struct rb_node **rb_link, *rb_parent;
89725 unsigned long charged = 0;
89726
89727 +#ifdef CONFIG_PAX_SEGMEXEC
89728 + struct vm_area_struct *vma_m = NULL;
89729 +#endif
89730 +
89731 + /*
89732 + * mm->mmap_sem is required to protect against another thread
89733 + * changing the mappings in case we sleep.
89734 + */
89735 + verify_mm_writelocked(mm);
89736 +
89737 /* Check against address space limit. */
89738 +
89739 +#ifdef CONFIG_PAX_RANDMMAP
89740 + if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
89741 +#endif
89742 +
89743 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
89744 unsigned long nr_pages;
89745
89746 @@ -1502,11 +1641,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
89747
89748 /* Clear old maps */
89749 error = -ENOMEM;
89750 -munmap_back:
89751 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
89752 if (do_munmap(mm, addr, len))
89753 return -ENOMEM;
89754 - goto munmap_back;
89755 + BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
89756 }
89757
89758 /*
89759 @@ -1537,6 +1675,16 @@ munmap_back:
89760 goto unacct_error;
89761 }
89762
89763 +#ifdef CONFIG_PAX_SEGMEXEC
89764 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
89765 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
89766 + if (!vma_m) {
89767 + error = -ENOMEM;
89768 + goto free_vma;
89769 + }
89770 + }
89771 +#endif
89772 +
89773 vma->vm_mm = mm;
89774 vma->vm_start = addr;
89775 vma->vm_end = addr + len;
89776 @@ -1556,6 +1704,13 @@ munmap_back:
89777 if (error)
89778 goto unmap_and_free_vma;
89779
89780 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
89781 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
89782 + vma->vm_flags |= VM_PAGEEXEC;
89783 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
89784 + }
89785 +#endif
89786 +
89787 /* Can addr have changed??
89788 *
89789 * Answer: Yes, several device drivers can do it in their
89790 @@ -1589,6 +1744,12 @@ munmap_back:
89791 }
89792
89793 vma_link(mm, vma, prev, rb_link, rb_parent);
89794 +
89795 +#ifdef CONFIG_PAX_SEGMEXEC
89796 + if (vma_m)
89797 + BUG_ON(pax_mirror_vma(vma_m, vma));
89798 +#endif
89799 +
89800 /* Once vma denies write, undo our temporary denial count */
89801 if (vm_flags & VM_DENYWRITE)
89802 allow_write_access(file);
89803 @@ -1597,6 +1758,7 @@ out:
89804 perf_event_mmap(vma);
89805
89806 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
89807 + track_exec_limit(mm, addr, addr + len, vm_flags);
89808 if (vm_flags & VM_LOCKED) {
89809 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
89810 vma == get_gate_vma(current->mm)))
89811 @@ -1629,6 +1791,12 @@ unmap_and_free_vma:
89812 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
89813 charged = 0;
89814 free_vma:
89815 +
89816 +#ifdef CONFIG_PAX_SEGMEXEC
89817 + if (vma_m)
89818 + kmem_cache_free(vm_area_cachep, vma_m);
89819 +#endif
89820 +
89821 kmem_cache_free(vm_area_cachep, vma);
89822 unacct_error:
89823 if (charged)
89824 @@ -1636,7 +1804,63 @@ unacct_error:
89825 return error;
89826 }
89827
89828 -unsigned long unmapped_area(struct vm_unmapped_area_info *info)
89829 +#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
89830 +unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
89831 +{
89832 + if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
89833 + return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
89834 +
89835 + return 0;
89836 +}
89837 +#endif
89838 +
89839 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
89840 +{
89841 + if (!vma) {
89842 +#ifdef CONFIG_STACK_GROWSUP
89843 + if (addr > sysctl_heap_stack_gap)
89844 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
89845 + else
89846 + vma = find_vma(current->mm, 0);
89847 + if (vma && (vma->vm_flags & VM_GROWSUP))
89848 + return false;
89849 +#endif
89850 + return true;
89851 + }
89852 +
89853 + if (addr + len > vma->vm_start)
89854 + return false;
89855 +
89856 + if (vma->vm_flags & VM_GROWSDOWN)
89857 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
89858 +#ifdef CONFIG_STACK_GROWSUP
89859 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
89860 + return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
89861 +#endif
89862 + else if (offset)
89863 + return offset <= vma->vm_start - addr - len;
89864 +
89865 + return true;
89866 +}
89867 +
89868 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
89869 +{
89870 + if (vma->vm_start < len)
89871 + return -ENOMEM;
89872 +
89873 + if (!(vma->vm_flags & VM_GROWSDOWN)) {
89874 + if (offset <= vma->vm_start - len)
89875 + return vma->vm_start - len - offset;
89876 + else
89877 + return -ENOMEM;
89878 + }
89879 +
89880 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
89881 + return vma->vm_start - len - sysctl_heap_stack_gap;
89882 + return -ENOMEM;
89883 +}
89884 +
89885 +unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
89886 {
89887 /*
89888 * We implement the search by looking for an rbtree node that
89889 @@ -1684,11 +1908,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
89890 }
89891 }
89892
89893 - gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
89894 + gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
89895 check_current:
89896 /* Check if current node has a suitable gap */
89897 if (gap_start > high_limit)
89898 return -ENOMEM;
89899 +
89900 + if (gap_end - gap_start > info->threadstack_offset)
89901 + gap_start += info->threadstack_offset;
89902 + else
89903 + gap_start = gap_end;
89904 +
89905 + if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
89906 + if (gap_end - gap_start > sysctl_heap_stack_gap)
89907 + gap_start += sysctl_heap_stack_gap;
89908 + else
89909 + gap_start = gap_end;
89910 + }
89911 + if (vma->vm_flags & VM_GROWSDOWN) {
89912 + if (gap_end - gap_start > sysctl_heap_stack_gap)
89913 + gap_end -= sysctl_heap_stack_gap;
89914 + else
89915 + gap_end = gap_start;
89916 + }
89917 if (gap_end >= low_limit && gap_end - gap_start >= length)
89918 goto found;
89919
89920 @@ -1738,7 +1980,7 @@ found:
89921 return gap_start;
89922 }
89923
89924 -unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
89925 +unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
89926 {
89927 struct mm_struct *mm = current->mm;
89928 struct vm_area_struct *vma;
89929 @@ -1792,6 +2034,24 @@ check_current:
89930 gap_end = vma->vm_start;
89931 if (gap_end < low_limit)
89932 return -ENOMEM;
89933 +
89934 + if (gap_end - gap_start > info->threadstack_offset)
89935 + gap_end -= info->threadstack_offset;
89936 + else
89937 + gap_end = gap_start;
89938 +
89939 + if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
89940 + if (gap_end - gap_start > sysctl_heap_stack_gap)
89941 + gap_start += sysctl_heap_stack_gap;
89942 + else
89943 + gap_start = gap_end;
89944 + }
89945 + if (vma->vm_flags & VM_GROWSDOWN) {
89946 + if (gap_end - gap_start > sysctl_heap_stack_gap)
89947 + gap_end -= sysctl_heap_stack_gap;
89948 + else
89949 + gap_end = gap_start;
89950 + }
89951 if (gap_start <= high_limit && gap_end - gap_start >= length)
89952 goto found;
89953
89954 @@ -1855,6 +2115,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
89955 struct mm_struct *mm = current->mm;
89956 struct vm_area_struct *vma;
89957 struct vm_unmapped_area_info info;
89958 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
89959
89960 if (len > TASK_SIZE - mmap_min_addr)
89961 return -ENOMEM;
89962 @@ -1862,19 +2123,30 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
89963 if (flags & MAP_FIXED)
89964 return addr;
89965
89966 +#ifdef CONFIG_PAX_RANDMMAP
89967 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
89968 +#endif
89969 +
89970 if (addr) {
89971 addr = PAGE_ALIGN(addr);
89972 vma = find_vma(mm, addr);
89973 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
89974 - (!vma || addr + len <= vma->vm_start))
89975 + check_heap_stack_gap(vma, addr, len, offset))
89976 return addr;
89977 }
89978
89979 info.flags = 0;
89980 info.length = len;
89981 info.low_limit = TASK_UNMAPPED_BASE;
89982 +
89983 +#ifdef CONFIG_PAX_RANDMMAP
89984 + if (mm->pax_flags & MF_PAX_RANDMMAP)
89985 + info.low_limit += mm->delta_mmap;
89986 +#endif
89987 +
89988 info.high_limit = TASK_SIZE;
89989 info.align_mask = 0;
89990 + info.threadstack_offset = offset;
89991 return vm_unmapped_area(&info);
89992 }
89993 #endif
89994 @@ -1893,6 +2165,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
89995 struct mm_struct *mm = current->mm;
89996 unsigned long addr = addr0;
89997 struct vm_unmapped_area_info info;
89998 + unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
89999
90000 /* requested length too big for entire address space */
90001 if (len > TASK_SIZE - mmap_min_addr)
90002 @@ -1901,12 +2174,16 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
90003 if (flags & MAP_FIXED)
90004 return addr;
90005
90006 +#ifdef CONFIG_PAX_RANDMMAP
90007 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
90008 +#endif
90009 +
90010 /* requesting a specific address */
90011 if (addr) {
90012 addr = PAGE_ALIGN(addr);
90013 vma = find_vma(mm, addr);
90014 if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
90015 - (!vma || addr + len <= vma->vm_start))
90016 + check_heap_stack_gap(vma, addr, len, offset))
90017 return addr;
90018 }
90019
90020 @@ -1915,6 +2192,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
90021 info.low_limit = max(PAGE_SIZE, mmap_min_addr);
90022 info.high_limit = mm->mmap_base;
90023 info.align_mask = 0;
90024 + info.threadstack_offset = offset;
90025 addr = vm_unmapped_area(&info);
90026
90027 /*
90028 @@ -1927,6 +2205,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
90029 VM_BUG_ON(addr != -ENOMEM);
90030 info.flags = 0;
90031 info.low_limit = TASK_UNMAPPED_BASE;
90032 +
90033 +#ifdef CONFIG_PAX_RANDMMAP
90034 + if (mm->pax_flags & MF_PAX_RANDMMAP)
90035 + info.low_limit += mm->delta_mmap;
90036 +#endif
90037 +
90038 info.high_limit = TASK_SIZE;
90039 addr = vm_unmapped_area(&info);
90040 }
90041 @@ -2028,6 +2312,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
90042 return vma;
90043 }
90044
90045 +#ifdef CONFIG_PAX_SEGMEXEC
90046 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
90047 +{
90048 + struct vm_area_struct *vma_m;
90049 +
90050 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
90051 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
90052 + BUG_ON(vma->vm_mirror);
90053 + return NULL;
90054 + }
90055 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
90056 + vma_m = vma->vm_mirror;
90057 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
90058 + BUG_ON(vma->vm_file != vma_m->vm_file);
90059 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
90060 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
90061 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
90062 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
90063 + return vma_m;
90064 +}
90065 +#endif
90066 +
90067 /*
90068 * Verify that the stack growth is acceptable and
90069 * update accounting. This is shared with both the
90070 @@ -2044,6 +2350,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
90071 return -ENOMEM;
90072
90073 /* Stack limit test */
90074 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
90075 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
90076 return -ENOMEM;
90077
90078 @@ -2054,6 +2361,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
90079 locked = mm->locked_vm + grow;
90080 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
90081 limit >>= PAGE_SHIFT;
90082 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
90083 if (locked > limit && !capable(CAP_IPC_LOCK))
90084 return -ENOMEM;
90085 }
90086 @@ -2083,37 +2391,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
90087 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
90088 * vma is the last one with address > vma->vm_end. Have to extend vma.
90089 */
90090 +#ifndef CONFIG_IA64
90091 +static
90092 +#endif
90093 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
90094 {
90095 int error;
90096 + bool locknext;
90097
90098 if (!(vma->vm_flags & VM_GROWSUP))
90099 return -EFAULT;
90100
90101 + /* Also guard against wrapping around to address 0. */
90102 + if (address < PAGE_ALIGN(address+1))
90103 + address = PAGE_ALIGN(address+1);
90104 + else
90105 + return -ENOMEM;
90106 +
90107 /*
90108 * We must make sure the anon_vma is allocated
90109 * so that the anon_vma locking is not a noop.
90110 */
90111 if (unlikely(anon_vma_prepare(vma)))
90112 return -ENOMEM;
90113 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
90114 + if (locknext && anon_vma_prepare(vma->vm_next))
90115 + return -ENOMEM;
90116 vma_lock_anon_vma(vma);
90117 + if (locknext)
90118 + vma_lock_anon_vma(vma->vm_next);
90119
90120 /*
90121 * vma->vm_start/vm_end cannot change under us because the caller
90122 * is required to hold the mmap_sem in read mode. We need the
90123 - * anon_vma lock to serialize against concurrent expand_stacks.
90124 - * Also guard against wrapping around to address 0.
90125 + * anon_vma locks to serialize against concurrent expand_stacks
90126 + * and expand_upwards.
90127 */
90128 - if (address < PAGE_ALIGN(address+4))
90129 - address = PAGE_ALIGN(address+4);
90130 - else {
90131 - vma_unlock_anon_vma(vma);
90132 - return -ENOMEM;
90133 - }
90134 error = 0;
90135
90136 /* Somebody else might have raced and expanded it already */
90137 - if (address > vma->vm_end) {
90138 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
90139 + error = -ENOMEM;
90140 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
90141 unsigned long size, grow;
90142
90143 size = address - vma->vm_start;
90144 @@ -2148,6 +2467,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
90145 }
90146 }
90147 }
90148 + if (locknext)
90149 + vma_unlock_anon_vma(vma->vm_next);
90150 vma_unlock_anon_vma(vma);
90151 khugepaged_enter_vma_merge(vma);
90152 validate_mm(vma->vm_mm);
90153 @@ -2162,6 +2483,8 @@ int expand_downwards(struct vm_area_struct *vma,
90154 unsigned long address)
90155 {
90156 int error;
90157 + bool lockprev = false;
90158 + struct vm_area_struct *prev;
90159
90160 /*
90161 * We must make sure the anon_vma is allocated
90162 @@ -2175,6 +2498,15 @@ int expand_downwards(struct vm_area_struct *vma,
90163 if (error)
90164 return error;
90165
90166 + prev = vma->vm_prev;
90167 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
90168 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
90169 +#endif
90170 + if (lockprev && anon_vma_prepare(prev))
90171 + return -ENOMEM;
90172 + if (lockprev)
90173 + vma_lock_anon_vma(prev);
90174 +
90175 vma_lock_anon_vma(vma);
90176
90177 /*
90178 @@ -2184,9 +2516,17 @@ int expand_downwards(struct vm_area_struct *vma,
90179 */
90180
90181 /* Somebody else might have raced and expanded it already */
90182 - if (address < vma->vm_start) {
90183 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
90184 + error = -ENOMEM;
90185 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
90186 unsigned long size, grow;
90187
90188 +#ifdef CONFIG_PAX_SEGMEXEC
90189 + struct vm_area_struct *vma_m;
90190 +
90191 + vma_m = pax_find_mirror_vma(vma);
90192 +#endif
90193 +
90194 size = vma->vm_end - address;
90195 grow = (vma->vm_start - address) >> PAGE_SHIFT;
90196
90197 @@ -2211,13 +2551,27 @@ int expand_downwards(struct vm_area_struct *vma,
90198 vma->vm_pgoff -= grow;
90199 anon_vma_interval_tree_post_update_vma(vma);
90200 vma_gap_update(vma);
90201 +
90202 +#ifdef CONFIG_PAX_SEGMEXEC
90203 + if (vma_m) {
90204 + anon_vma_interval_tree_pre_update_vma(vma_m);
90205 + vma_m->vm_start -= grow << PAGE_SHIFT;
90206 + vma_m->vm_pgoff -= grow;
90207 + anon_vma_interval_tree_post_update_vma(vma_m);
90208 + vma_gap_update(vma_m);
90209 + }
90210 +#endif
90211 +
90212 spin_unlock(&vma->vm_mm->page_table_lock);
90213
90214 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
90215 perf_event_mmap(vma);
90216 }
90217 }
90218 }
90219 vma_unlock_anon_vma(vma);
90220 + if (lockprev)
90221 + vma_unlock_anon_vma(prev);
90222 khugepaged_enter_vma_merge(vma);
90223 validate_mm(vma->vm_mm);
90224 return error;
90225 @@ -2315,6 +2669,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
90226 do {
90227 long nrpages = vma_pages(vma);
90228
90229 +#ifdef CONFIG_PAX_SEGMEXEC
90230 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
90231 + vma = remove_vma(vma);
90232 + continue;
90233 + }
90234 +#endif
90235 +
90236 if (vma->vm_flags & VM_ACCOUNT)
90237 nr_accounted += nrpages;
90238 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
90239 @@ -2359,6 +2720,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
90240 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
90241 vma->vm_prev = NULL;
90242 do {
90243 +
90244 +#ifdef CONFIG_PAX_SEGMEXEC
90245 + if (vma->vm_mirror) {
90246 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
90247 + vma->vm_mirror->vm_mirror = NULL;
90248 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
90249 + vma->vm_mirror = NULL;
90250 + }
90251 +#endif
90252 +
90253 vma_rb_erase(vma, &mm->mm_rb);
90254 mm->map_count--;
90255 tail_vma = vma;
90256 @@ -2384,14 +2755,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
90257 struct vm_area_struct *new;
90258 int err = -ENOMEM;
90259
90260 +#ifdef CONFIG_PAX_SEGMEXEC
90261 + struct vm_area_struct *vma_m, *new_m = NULL;
90262 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
90263 +#endif
90264 +
90265 if (is_vm_hugetlb_page(vma) && (addr &
90266 ~(huge_page_mask(hstate_vma(vma)))))
90267 return -EINVAL;
90268
90269 +#ifdef CONFIG_PAX_SEGMEXEC
90270 + vma_m = pax_find_mirror_vma(vma);
90271 +#endif
90272 +
90273 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90274 if (!new)
90275 goto out_err;
90276
90277 +#ifdef CONFIG_PAX_SEGMEXEC
90278 + if (vma_m) {
90279 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
90280 + if (!new_m) {
90281 + kmem_cache_free(vm_area_cachep, new);
90282 + goto out_err;
90283 + }
90284 + }
90285 +#endif
90286 +
90287 /* most fields are the same, copy all, and then fixup */
90288 *new = *vma;
90289
90290 @@ -2404,6 +2794,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
90291 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
90292 }
90293
90294 +#ifdef CONFIG_PAX_SEGMEXEC
90295 + if (vma_m) {
90296 + *new_m = *vma_m;
90297 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
90298 + new_m->vm_mirror = new;
90299 + new->vm_mirror = new_m;
90300 +
90301 + if (new_below)
90302 + new_m->vm_end = addr_m;
90303 + else {
90304 + new_m->vm_start = addr_m;
90305 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
90306 + }
90307 + }
90308 +#endif
90309 +
90310 err = vma_dup_policy(vma, new);
90311 if (err)
90312 goto out_free_vma;
90313 @@ -2423,6 +2829,38 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
90314 else
90315 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
90316
90317 +#ifdef CONFIG_PAX_SEGMEXEC
90318 + if (!err && vma_m) {
90319 + struct mempolicy *pol = vma_policy(new);
90320 +
90321 + if (anon_vma_clone(new_m, vma_m))
90322 + goto out_free_mpol;
90323 +
90324 + mpol_get(pol);
90325 + set_vma_policy(new_m, pol);
90326 +
90327 + if (new_m->vm_file)
90328 + get_file(new_m->vm_file);
90329 +
90330 + if (new_m->vm_ops && new_m->vm_ops->open)
90331 + new_m->vm_ops->open(new_m);
90332 +
90333 + if (new_below)
90334 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
90335 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
90336 + else
90337 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
90338 +
90339 + if (err) {
90340 + if (new_m->vm_ops && new_m->vm_ops->close)
90341 + new_m->vm_ops->close(new_m);
90342 + if (new_m->vm_file)
90343 + fput(new_m->vm_file);
90344 + mpol_put(pol);
90345 + }
90346 + }
90347 +#endif
90348 +
90349 /* Success. */
90350 if (!err)
90351 return 0;
90352 @@ -2432,10 +2870,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
90353 new->vm_ops->close(new);
90354 if (new->vm_file)
90355 fput(new->vm_file);
90356 - unlink_anon_vmas(new);
90357 out_free_mpol:
90358 mpol_put(vma_policy(new));
90359 out_free_vma:
90360 +
90361 +#ifdef CONFIG_PAX_SEGMEXEC
90362 + if (new_m) {
90363 + unlink_anon_vmas(new_m);
90364 + kmem_cache_free(vm_area_cachep, new_m);
90365 + }
90366 +#endif
90367 +
90368 + unlink_anon_vmas(new);
90369 kmem_cache_free(vm_area_cachep, new);
90370 out_err:
90371 return err;
90372 @@ -2448,6 +2894,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
90373 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
90374 unsigned long addr, int new_below)
90375 {
90376 +
90377 +#ifdef CONFIG_PAX_SEGMEXEC
90378 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
90379 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
90380 + if (mm->map_count >= sysctl_max_map_count-1)
90381 + return -ENOMEM;
90382 + } else
90383 +#endif
90384 +
90385 if (mm->map_count >= sysctl_max_map_count)
90386 return -ENOMEM;
90387
90388 @@ -2459,11 +2914,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
90389 * work. This now handles partial unmappings.
90390 * Jeremy Fitzhardinge <jeremy@goop.org>
90391 */
90392 +#ifdef CONFIG_PAX_SEGMEXEC
90393 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
90394 {
90395 + int ret = __do_munmap(mm, start, len);
90396 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
90397 + return ret;
90398 +
90399 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
90400 +}
90401 +
90402 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
90403 +#else
90404 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
90405 +#endif
90406 +{
90407 unsigned long end;
90408 struct vm_area_struct *vma, *prev, *last;
90409
90410 + /*
90411 + * mm->mmap_sem is required to protect against another thread
90412 + * changing the mappings in case we sleep.
90413 + */
90414 + verify_mm_writelocked(mm);
90415 +
90416 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
90417 return -EINVAL;
90418
90419 @@ -2538,6 +3012,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
90420 /* Fix up all other VM information */
90421 remove_vma_list(mm, vma);
90422
90423 + track_exec_limit(mm, start, end, 0UL);
90424 +
90425 return 0;
90426 }
90427
90428 @@ -2546,6 +3022,13 @@ int vm_munmap(unsigned long start, size_t len)
90429 int ret;
90430 struct mm_struct *mm = current->mm;
90431
90432 +
90433 +#ifdef CONFIG_PAX_SEGMEXEC
90434 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
90435 + (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
90436 + return -EINVAL;
90437 +#endif
90438 +
90439 down_write(&mm->mmap_sem);
90440 ret = do_munmap(mm, start, len);
90441 up_write(&mm->mmap_sem);
90442 @@ -2559,16 +3042,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
90443 return vm_munmap(addr, len);
90444 }
90445
90446 -static inline void verify_mm_writelocked(struct mm_struct *mm)
90447 -{
90448 -#ifdef CONFIG_DEBUG_VM
90449 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
90450 - WARN_ON(1);
90451 - up_read(&mm->mmap_sem);
90452 - }
90453 -#endif
90454 -}
90455 -
90456 /*
90457 * this is really a simplified "do_mmap". it only handles
90458 * anonymous maps. eventually we may be able to do some
90459 @@ -2582,6 +3055,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
90460 struct rb_node ** rb_link, * rb_parent;
90461 pgoff_t pgoff = addr >> PAGE_SHIFT;
90462 int error;
90463 + unsigned long charged;
90464
90465 len = PAGE_ALIGN(len);
90466 if (!len)
90467 @@ -2589,16 +3063,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
90468
90469 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
90470
90471 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
90472 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
90473 + flags &= ~VM_EXEC;
90474 +
90475 +#ifdef CONFIG_PAX_MPROTECT
90476 + if (mm->pax_flags & MF_PAX_MPROTECT)
90477 + flags &= ~VM_MAYEXEC;
90478 +#endif
90479 +
90480 + }
90481 +#endif
90482 +
90483 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
90484 if (error & ~PAGE_MASK)
90485 return error;
90486
90487 + charged = len >> PAGE_SHIFT;
90488 +
90489 /*
90490 * mlock MCL_FUTURE?
90491 */
90492 if (mm->def_flags & VM_LOCKED) {
90493 unsigned long locked, lock_limit;
90494 - locked = len >> PAGE_SHIFT;
90495 + locked = charged;
90496 locked += mm->locked_vm;
90497 lock_limit = rlimit(RLIMIT_MEMLOCK);
90498 lock_limit >>= PAGE_SHIFT;
90499 @@ -2615,21 +3103,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
90500 /*
90501 * Clear old maps. this also does some error checking for us
90502 */
90503 - munmap_back:
90504 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
90505 if (do_munmap(mm, addr, len))
90506 return -ENOMEM;
90507 - goto munmap_back;
90508 + BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
90509 }
90510
90511 /* Check against address space limits *after* clearing old maps... */
90512 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
90513 + if (!may_expand_vm(mm, charged))
90514 return -ENOMEM;
90515
90516 if (mm->map_count > sysctl_max_map_count)
90517 return -ENOMEM;
90518
90519 - if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
90520 + if (security_vm_enough_memory_mm(mm, charged))
90521 return -ENOMEM;
90522
90523 /* Can we just expand an old private anonymous mapping? */
90524 @@ -2643,7 +3130,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
90525 */
90526 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
90527 if (!vma) {
90528 - vm_unacct_memory(len >> PAGE_SHIFT);
90529 + vm_unacct_memory(charged);
90530 return -ENOMEM;
90531 }
90532
90533 @@ -2657,10 +3144,11 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
90534 vma_link(mm, vma, prev, rb_link, rb_parent);
90535 out:
90536 perf_event_mmap(vma);
90537 - mm->total_vm += len >> PAGE_SHIFT;
90538 + mm->total_vm += charged;
90539 if (flags & VM_LOCKED)
90540 - mm->locked_vm += (len >> PAGE_SHIFT);
90541 + mm->locked_vm += charged;
90542 vma->vm_flags |= VM_SOFTDIRTY;
90543 + track_exec_limit(mm, addr, addr + len, flags);
90544 return addr;
90545 }
90546
90547 @@ -2722,6 +3210,7 @@ void exit_mmap(struct mm_struct *mm)
90548 while (vma) {
90549 if (vma->vm_flags & VM_ACCOUNT)
90550 nr_accounted += vma_pages(vma);
90551 + vma->vm_mirror = NULL;
90552 vma = remove_vma(vma);
90553 }
90554 vm_unacct_memory(nr_accounted);
90555 @@ -2738,6 +3227,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
90556 struct vm_area_struct *prev;
90557 struct rb_node **rb_link, *rb_parent;
90558
90559 +#ifdef CONFIG_PAX_SEGMEXEC
90560 + struct vm_area_struct *vma_m = NULL;
90561 +#endif
90562 +
90563 + if (security_mmap_addr(vma->vm_start))
90564 + return -EPERM;
90565 +
90566 /*
90567 * The vm_pgoff of a purely anonymous vma should be irrelevant
90568 * until its first write fault, when page's anon_vma and index
90569 @@ -2761,7 +3257,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
90570 security_vm_enough_memory_mm(mm, vma_pages(vma)))
90571 return -ENOMEM;
90572
90573 +#ifdef CONFIG_PAX_SEGMEXEC
90574 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
90575 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
90576 + if (!vma_m)
90577 + return -ENOMEM;
90578 + }
90579 +#endif
90580 +
90581 vma_link(mm, vma, prev, rb_link, rb_parent);
90582 +
90583 +#ifdef CONFIG_PAX_SEGMEXEC
90584 + if (vma_m)
90585 + BUG_ON(pax_mirror_vma(vma_m, vma));
90586 +#endif
90587 +
90588 return 0;
90589 }
90590
90591 @@ -2780,6 +3290,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
90592 struct rb_node **rb_link, *rb_parent;
90593 bool faulted_in_anon_vma = true;
90594
90595 + BUG_ON(vma->vm_mirror);
90596 +
90597 /*
90598 * If anonymous vma has not yet been faulted, update new pgoff
90599 * to match new location, to increase its chance of merging.
90600 @@ -2844,6 +3356,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
90601 return NULL;
90602 }
90603
90604 +#ifdef CONFIG_PAX_SEGMEXEC
90605 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
90606 +{
90607 + struct vm_area_struct *prev_m;
90608 + struct rb_node **rb_link_m, *rb_parent_m;
90609 + struct mempolicy *pol_m;
90610 +
90611 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
90612 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
90613 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
90614 + *vma_m = *vma;
90615 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
90616 + if (anon_vma_clone(vma_m, vma))
90617 + return -ENOMEM;
90618 + pol_m = vma_policy(vma_m);
90619 + mpol_get(pol_m);
90620 + set_vma_policy(vma_m, pol_m);
90621 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
90622 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
90623 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
90624 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
90625 + if (vma_m->vm_file)
90626 + get_file(vma_m->vm_file);
90627 + if (vma_m->vm_ops && vma_m->vm_ops->open)
90628 + vma_m->vm_ops->open(vma_m);
90629 + BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
90630 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
90631 + vma_m->vm_mirror = vma;
90632 + vma->vm_mirror = vma_m;
90633 + return 0;
90634 +}
90635 +#endif
90636 +
90637 /*
90638 * Return true if the calling process may expand its vm space by the passed
90639 * number of pages
90640 @@ -2855,6 +3400,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
90641
90642 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
90643
90644 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
90645 if (cur + npages > lim)
90646 return 0;
90647 return 1;
90648 @@ -2925,6 +3471,22 @@ int install_special_mapping(struct mm_struct *mm,
90649 vma->vm_start = addr;
90650 vma->vm_end = addr + len;
90651
90652 +#ifdef CONFIG_PAX_MPROTECT
90653 + if (mm->pax_flags & MF_PAX_MPROTECT) {
90654 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
90655 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
90656 + return -EPERM;
90657 + if (!(vm_flags & VM_EXEC))
90658 + vm_flags &= ~VM_MAYEXEC;
90659 +#else
90660 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
90661 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
90662 +#endif
90663 + else
90664 + vm_flags &= ~VM_MAYWRITE;
90665 + }
90666 +#endif
90667 +
90668 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
90669 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
90670
90671 diff --git a/mm/mprotect.c b/mm/mprotect.c
90672 index 6c3f56f..b2340b0 100644
90673 --- a/mm/mprotect.c
90674 +++ b/mm/mprotect.c
90675 @@ -23,10 +23,18 @@
90676 #include <linux/mmu_notifier.h>
90677 #include <linux/migrate.h>
90678 #include <linux/perf_event.h>
90679 +#include <linux/sched/sysctl.h>
90680 +
90681 +#ifdef CONFIG_PAX_MPROTECT
90682 +#include <linux/elf.h>
90683 +#include <linux/binfmts.h>
90684 +#endif
90685 +
90686 #include <asm/uaccess.h>
90687 #include <asm/pgtable.h>
90688 #include <asm/cacheflush.h>
90689 #include <asm/tlbflush.h>
90690 +#include <asm/mmu_context.h>
90691
90692 #ifndef pgprot_modify
90693 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
90694 @@ -241,6 +249,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
90695 return pages;
90696 }
90697
90698 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
90699 +/* called while holding the mmap semaphor for writing except stack expansion */
90700 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
90701 +{
90702 + unsigned long oldlimit, newlimit = 0UL;
90703 +
90704 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
90705 + return;
90706 +
90707 + spin_lock(&mm->page_table_lock);
90708 + oldlimit = mm->context.user_cs_limit;
90709 + if ((prot & VM_EXEC) && oldlimit < end)
90710 + /* USER_CS limit moved up */
90711 + newlimit = end;
90712 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
90713 + /* USER_CS limit moved down */
90714 + newlimit = start;
90715 +
90716 + if (newlimit) {
90717 + mm->context.user_cs_limit = newlimit;
90718 +
90719 +#ifdef CONFIG_SMP
90720 + wmb();
90721 + cpus_clear(mm->context.cpu_user_cs_mask);
90722 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
90723 +#endif
90724 +
90725 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
90726 + }
90727 + spin_unlock(&mm->page_table_lock);
90728 + if (newlimit == end) {
90729 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
90730 +
90731 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
90732 + if (is_vm_hugetlb_page(vma))
90733 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
90734 + else
90735 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
90736 + }
90737 +}
90738 +#endif
90739 +
90740 int
90741 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
90742 unsigned long start, unsigned long end, unsigned long newflags)
90743 @@ -253,11 +303,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
90744 int error;
90745 int dirty_accountable = 0;
90746
90747 +#ifdef CONFIG_PAX_SEGMEXEC
90748 + struct vm_area_struct *vma_m = NULL;
90749 + unsigned long start_m, end_m;
90750 +
90751 + start_m = start + SEGMEXEC_TASK_SIZE;
90752 + end_m = end + SEGMEXEC_TASK_SIZE;
90753 +#endif
90754 +
90755 if (newflags == oldflags) {
90756 *pprev = vma;
90757 return 0;
90758 }
90759
90760 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
90761 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
90762 +
90763 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
90764 + return -ENOMEM;
90765 +
90766 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
90767 + return -ENOMEM;
90768 + }
90769 +
90770 /*
90771 * If we make a private mapping writable we increase our commit;
90772 * but (without finer accounting) cannot reduce our commit if we
90773 @@ -274,6 +342,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
90774 }
90775 }
90776
90777 +#ifdef CONFIG_PAX_SEGMEXEC
90778 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
90779 + if (start != vma->vm_start) {
90780 + error = split_vma(mm, vma, start, 1);
90781 + if (error)
90782 + goto fail;
90783 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
90784 + *pprev = (*pprev)->vm_next;
90785 + }
90786 +
90787 + if (end != vma->vm_end) {
90788 + error = split_vma(mm, vma, end, 0);
90789 + if (error)
90790 + goto fail;
90791 + }
90792 +
90793 + if (pax_find_mirror_vma(vma)) {
90794 + error = __do_munmap(mm, start_m, end_m - start_m);
90795 + if (error)
90796 + goto fail;
90797 + } else {
90798 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
90799 + if (!vma_m) {
90800 + error = -ENOMEM;
90801 + goto fail;
90802 + }
90803 + vma->vm_flags = newflags;
90804 + error = pax_mirror_vma(vma_m, vma);
90805 + if (error) {
90806 + vma->vm_flags = oldflags;
90807 + goto fail;
90808 + }
90809 + }
90810 + }
90811 +#endif
90812 +
90813 /*
90814 * First try to merge with previous and/or next vma.
90815 */
90816 @@ -304,9 +408,21 @@ success:
90817 * vm_flags and vm_page_prot are protected by the mmap_sem
90818 * held in write mode.
90819 */
90820 +
90821 +#ifdef CONFIG_PAX_SEGMEXEC
90822 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
90823 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
90824 +#endif
90825 +
90826 vma->vm_flags = newflags;
90827 +
90828 +#ifdef CONFIG_PAX_MPROTECT
90829 + if (mm->binfmt && mm->binfmt->handle_mprotect)
90830 + mm->binfmt->handle_mprotect(vma, newflags);
90831 +#endif
90832 +
90833 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
90834 - vm_get_page_prot(newflags));
90835 + vm_get_page_prot(vma->vm_flags));
90836
90837 if (vma_wants_writenotify(vma)) {
90838 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
90839 @@ -345,6 +461,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
90840 end = start + len;
90841 if (end <= start)
90842 return -ENOMEM;
90843 +
90844 +#ifdef CONFIG_PAX_SEGMEXEC
90845 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
90846 + if (end > SEGMEXEC_TASK_SIZE)
90847 + return -EINVAL;
90848 + } else
90849 +#endif
90850 +
90851 + if (end > TASK_SIZE)
90852 + return -EINVAL;
90853 +
90854 if (!arch_validate_prot(prot))
90855 return -EINVAL;
90856
90857 @@ -352,7 +479,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
90858 /*
90859 * Does the application expect PROT_READ to imply PROT_EXEC:
90860 */
90861 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
90862 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
90863 prot |= PROT_EXEC;
90864
90865 vm_flags = calc_vm_prot_bits(prot);
90866 @@ -384,6 +511,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
90867 if (start > vma->vm_start)
90868 prev = vma;
90869
90870 +#ifdef CONFIG_PAX_MPROTECT
90871 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
90872 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
90873 +#endif
90874 +
90875 for (nstart = start ; ; ) {
90876 unsigned long newflags;
90877
90878 @@ -394,6 +526,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
90879
90880 /* newflags >> 4 shift VM_MAY% in place of VM_% */
90881 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
90882 + if (prot & (PROT_WRITE | PROT_EXEC))
90883 + gr_log_rwxmprotect(vma);
90884 +
90885 + error = -EACCES;
90886 + goto out;
90887 + }
90888 +
90889 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
90890 error = -EACCES;
90891 goto out;
90892 }
90893 @@ -408,6 +548,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
90894 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
90895 if (error)
90896 goto out;
90897 +
90898 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
90899 +
90900 nstart = tmp;
90901
90902 if (nstart < prev->vm_end)
90903 diff --git a/mm/mremap.c b/mm/mremap.c
90904 index 0843feb..4f5b2e6 100644
90905 --- a/mm/mremap.c
90906 +++ b/mm/mremap.c
90907 @@ -144,6 +144,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
90908 continue;
90909 pte = ptep_get_and_clear(mm, old_addr, old_pte);
90910 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
90911 +
90912 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
90913 + if (!(__supported_pte_mask & _PAGE_NX) && pte_present(pte) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
90914 + pte = pte_exprotect(pte);
90915 +#endif
90916 +
90917 pte = move_soft_dirty_pte(pte);
90918 set_pte_at(mm, new_addr, new_pte, pte);
90919 }
90920 @@ -337,6 +343,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
90921 if (is_vm_hugetlb_page(vma))
90922 goto Einval;
90923
90924 +#ifdef CONFIG_PAX_SEGMEXEC
90925 + if (pax_find_mirror_vma(vma))
90926 + goto Einval;
90927 +#endif
90928 +
90929 /* We can't remap across vm area boundaries */
90930 if (old_len > vma->vm_end - addr)
90931 goto Efault;
90932 @@ -392,20 +403,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
90933 unsigned long ret = -EINVAL;
90934 unsigned long charged = 0;
90935 unsigned long map_flags;
90936 + unsigned long pax_task_size = TASK_SIZE;
90937
90938 if (new_addr & ~PAGE_MASK)
90939 goto out;
90940
90941 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
90942 +#ifdef CONFIG_PAX_SEGMEXEC
90943 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
90944 + pax_task_size = SEGMEXEC_TASK_SIZE;
90945 +#endif
90946 +
90947 + pax_task_size -= PAGE_SIZE;
90948 +
90949 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
90950 goto out;
90951
90952 /* Check if the location we're moving into overlaps the
90953 * old location at all, and fail if it does.
90954 */
90955 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
90956 - goto out;
90957 -
90958 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
90959 + if (addr + old_len > new_addr && new_addr + new_len > addr)
90960 goto out;
90961
90962 ret = do_munmap(mm, new_addr, new_len);
90963 @@ -474,6 +490,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
90964 unsigned long ret = -EINVAL;
90965 unsigned long charged = 0;
90966 bool locked = false;
90967 + unsigned long pax_task_size = TASK_SIZE;
90968
90969 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
90970 return ret;
90971 @@ -495,6 +512,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
90972 if (!new_len)
90973 return ret;
90974
90975 +#ifdef CONFIG_PAX_SEGMEXEC
90976 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
90977 + pax_task_size = SEGMEXEC_TASK_SIZE;
90978 +#endif
90979 +
90980 + pax_task_size -= PAGE_SIZE;
90981 +
90982 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
90983 + old_len > pax_task_size || addr > pax_task_size-old_len)
90984 + return ret;
90985 +
90986 down_write(&current->mm->mmap_sem);
90987
90988 if (flags & MREMAP_FIXED) {
90989 @@ -545,6 +573,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
90990 new_addr = addr;
90991 }
90992 ret = addr;
90993 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
90994 goto out;
90995 }
90996 }
90997 @@ -568,7 +597,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
90998 goto out;
90999 }
91000
91001 + map_flags = vma->vm_flags;
91002 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
91003 + if (!(ret & ~PAGE_MASK)) {
91004 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
91005 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
91006 + }
91007 }
91008 out:
91009 if (ret & ~PAGE_MASK)
91010 diff --git a/mm/nommu.c b/mm/nommu.c
91011 index ecd1f15..77039bd 100644
91012 --- a/mm/nommu.c
91013 +++ b/mm/nommu.c
91014 @@ -64,7 +64,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
91015 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
91016 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
91017 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
91018 -int heap_stack_gap = 0;
91019
91020 atomic_long_t mmap_pages_allocated;
91021
91022 @@ -844,15 +843,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
91023 EXPORT_SYMBOL(find_vma);
91024
91025 /*
91026 - * find a VMA
91027 - * - we don't extend stack VMAs under NOMMU conditions
91028 - */
91029 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
91030 -{
91031 - return find_vma(mm, addr);
91032 -}
91033 -
91034 -/*
91035 * expand a stack to a given address
91036 * - not supported under NOMMU conditions
91037 */
91038 @@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
91039
91040 /* most fields are the same, copy all, and then fixup */
91041 *new = *vma;
91042 + INIT_LIST_HEAD(&new->anon_vma_chain);
91043 *region = *vma->vm_region;
91044 new->vm_region = region;
91045
91046 @@ -1993,8 +1984,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
91047 }
91048 EXPORT_SYMBOL(generic_file_remap_pages);
91049
91050 -static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91051 - unsigned long addr, void *buf, int len, int write)
91052 +static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91053 + unsigned long addr, void *buf, size_t len, int write)
91054 {
91055 struct vm_area_struct *vma;
91056
91057 @@ -2035,8 +2026,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
91058 *
91059 * The caller must hold a reference on @mm.
91060 */
91061 -int access_remote_vm(struct mm_struct *mm, unsigned long addr,
91062 - void *buf, int len, int write)
91063 +ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
91064 + void *buf, size_t len, int write)
91065 {
91066 return __access_remote_vm(NULL, mm, addr, buf, len, write);
91067 }
91068 @@ -2045,7 +2036,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
91069 * Access another process' address space.
91070 * - source/target buffer must be kernel space
91071 */
91072 -int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
91073 +ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
91074 {
91075 struct mm_struct *mm;
91076
91077 diff --git a/mm/page-writeback.c b/mm/page-writeback.c
91078 index 6380758..4064aec 100644
91079 --- a/mm/page-writeback.c
91080 +++ b/mm/page-writeback.c
91081 @@ -690,7 +690,7 @@ static inline long long pos_ratio_polynom(unsigned long setpoint,
91082 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
91083 * - the bdi dirty thresh drops quickly due to change of JBOD workload
91084 */
91085 -static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
91086 +static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
91087 unsigned long thresh,
91088 unsigned long bg_thresh,
91089 unsigned long dirty,
91090 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
91091 index dd886fa..7686339 100644
91092 --- a/mm/page_alloc.c
91093 +++ b/mm/page_alloc.c
91094 @@ -61,6 +61,7 @@
91095 #include <linux/page-debug-flags.h>
91096 #include <linux/hugetlb.h>
91097 #include <linux/sched/rt.h>
91098 +#include <linux/random.h>
91099
91100 #include <asm/sections.h>
91101 #include <asm/tlbflush.h>
91102 @@ -354,7 +355,7 @@ out:
91103 * This usage means that zero-order pages may not be compound.
91104 */
91105
91106 -static void free_compound_page(struct page *page)
91107 +void free_compound_page(struct page *page)
91108 {
91109 __free_pages_ok(page, compound_order(page));
91110 }
91111 @@ -712,6 +713,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
91112 int i;
91113 int bad = 0;
91114
91115 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
91116 + unsigned long index = 1UL << order;
91117 +#endif
91118 +
91119 trace_mm_page_free(page, order);
91120 kmemcheck_free_shadow(page, order);
91121
91122 @@ -728,6 +733,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
91123 debug_check_no_obj_freed(page_address(page),
91124 PAGE_SIZE << order);
91125 }
91126 +
91127 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
91128 + for (; index; --index)
91129 + sanitize_highpage(page + index - 1);
91130 +#endif
91131 +
91132 arch_free_page(page, order);
91133 kernel_map_pages(page, 1 << order, 0);
91134
91135 @@ -750,6 +761,20 @@ static void __free_pages_ok(struct page *page, unsigned int order)
91136 local_irq_restore(flags);
91137 }
91138
91139 +#ifdef CONFIG_PAX_LATENT_ENTROPY
91140 +bool __meminitdata extra_latent_entropy;
91141 +
91142 +static int __init setup_pax_extra_latent_entropy(char *str)
91143 +{
91144 + extra_latent_entropy = true;
91145 + return 0;
91146 +}
91147 +early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
91148 +
91149 +volatile u64 latent_entropy __latent_entropy;
91150 +EXPORT_SYMBOL(latent_entropy);
91151 +#endif
91152 +
91153 void __init __free_pages_bootmem(struct page *page, unsigned int order)
91154 {
91155 unsigned int nr_pages = 1 << order;
91156 @@ -765,6 +790,19 @@ void __init __free_pages_bootmem(struct page *page, unsigned int order)
91157 __ClearPageReserved(p);
91158 set_page_count(p, 0);
91159
91160 +#ifdef CONFIG_PAX_LATENT_ENTROPY
91161 + if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
91162 + u64 hash = 0;
91163 + size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
91164 + const u64 *data = lowmem_page_address(page);
91165 +
91166 + for (index = 0; index < end; index++)
91167 + hash ^= hash + data[index];
91168 + latent_entropy ^= hash;
91169 + add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
91170 + }
91171 +#endif
91172 +
91173 page_zone(page)->managed_pages += nr_pages;
91174 set_page_refcounted(page);
91175 __free_pages(page, order);
91176 @@ -870,8 +908,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
91177 arch_alloc_page(page, order);
91178 kernel_map_pages(page, 1 << order, 1);
91179
91180 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
91181 if (gfp_flags & __GFP_ZERO)
91182 prep_zero_page(page, order, gfp_flags);
91183 +#endif
91184
91185 if (order && (gfp_flags & __GFP_COMP))
91186 prep_compound_page(page, order);
91187 diff --git a/mm/page_io.c b/mm/page_io.c
91188 index 8c79a47..a689e0d 100644
91189 --- a/mm/page_io.c
91190 +++ b/mm/page_io.c
91191 @@ -260,7 +260,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
91192 struct file *swap_file = sis->swap_file;
91193 struct address_space *mapping = swap_file->f_mapping;
91194 struct iovec iov = {
91195 - .iov_base = kmap(page),
91196 + .iov_base = (void __force_user *)kmap(page),
91197 .iov_len = PAGE_SIZE,
91198 };
91199
91200 diff --git a/mm/percpu.c b/mm/percpu.c
91201 index 8c8e08f..73a5cda 100644
91202 --- a/mm/percpu.c
91203 +++ b/mm/percpu.c
91204 @@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
91205 static unsigned int pcpu_high_unit_cpu __read_mostly;
91206
91207 /* the address of the first chunk which starts with the kernel static area */
91208 -void *pcpu_base_addr __read_mostly;
91209 +void *pcpu_base_addr __read_only;
91210 EXPORT_SYMBOL_GPL(pcpu_base_addr);
91211
91212 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
91213 diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
91214 index fd26d04..0cea1b0 100644
91215 --- a/mm/process_vm_access.c
91216 +++ b/mm/process_vm_access.c
91217 @@ -13,6 +13,7 @@
91218 #include <linux/uio.h>
91219 #include <linux/sched.h>
91220 #include <linux/highmem.h>
91221 +#include <linux/security.h>
91222 #include <linux/ptrace.h>
91223 #include <linux/slab.h>
91224 #include <linux/syscalls.h>
91225 @@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
91226 size_t iov_l_curr_offset = 0;
91227 ssize_t iov_len;
91228
91229 + return -ENOSYS; // PaX: until properly audited
91230 +
91231 /*
91232 * Work out how many pages of struct pages we're going to need
91233 * when eventually calling get_user_pages
91234 */
91235 for (i = 0; i < riovcnt; i++) {
91236 iov_len = rvec[i].iov_len;
91237 - if (iov_len > 0) {
91238 - nr_pages_iov = ((unsigned long)rvec[i].iov_base
91239 - + iov_len)
91240 - / PAGE_SIZE - (unsigned long)rvec[i].iov_base
91241 - / PAGE_SIZE + 1;
91242 - nr_pages = max(nr_pages, nr_pages_iov);
91243 - }
91244 + if (iov_len <= 0)
91245 + continue;
91246 + nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
91247 + (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
91248 + nr_pages = max(nr_pages, nr_pages_iov);
91249 }
91250
91251 if (nr_pages == 0)
91252 @@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
91253 goto free_proc_pages;
91254 }
91255
91256 + if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
91257 + rc = -EPERM;
91258 + goto put_task_struct;
91259 + }
91260 +
91261 mm = mm_access(task, PTRACE_MODE_ATTACH);
91262 if (!mm || IS_ERR(mm)) {
91263 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
91264 diff --git a/mm/rmap.c b/mm/rmap.c
91265 index fd3ee7a..e4baa1f 100644
91266 --- a/mm/rmap.c
91267 +++ b/mm/rmap.c
91268 @@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
91269 struct anon_vma *anon_vma = vma->anon_vma;
91270 struct anon_vma_chain *avc;
91271
91272 +#ifdef CONFIG_PAX_SEGMEXEC
91273 + struct anon_vma_chain *avc_m = NULL;
91274 +#endif
91275 +
91276 might_sleep();
91277 if (unlikely(!anon_vma)) {
91278 struct mm_struct *mm = vma->vm_mm;
91279 @@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
91280 if (!avc)
91281 goto out_enomem;
91282
91283 +#ifdef CONFIG_PAX_SEGMEXEC
91284 + avc_m = anon_vma_chain_alloc(GFP_KERNEL);
91285 + if (!avc_m)
91286 + goto out_enomem_free_avc;
91287 +#endif
91288 +
91289 anon_vma = find_mergeable_anon_vma(vma);
91290 allocated = NULL;
91291 if (!anon_vma) {
91292 @@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
91293 /* page_table_lock to protect against threads */
91294 spin_lock(&mm->page_table_lock);
91295 if (likely(!vma->anon_vma)) {
91296 +
91297 +#ifdef CONFIG_PAX_SEGMEXEC
91298 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
91299 +
91300 + if (vma_m) {
91301 + BUG_ON(vma_m->anon_vma);
91302 + vma_m->anon_vma = anon_vma;
91303 + anon_vma_chain_link(vma_m, avc_m, anon_vma);
91304 + avc_m = NULL;
91305 + }
91306 +#endif
91307 +
91308 vma->anon_vma = anon_vma;
91309 anon_vma_chain_link(vma, avc, anon_vma);
91310 allocated = NULL;
91311 @@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
91312
91313 if (unlikely(allocated))
91314 put_anon_vma(allocated);
91315 +
91316 +#ifdef CONFIG_PAX_SEGMEXEC
91317 + if (unlikely(avc_m))
91318 + anon_vma_chain_free(avc_m);
91319 +#endif
91320 +
91321 if (unlikely(avc))
91322 anon_vma_chain_free(avc);
91323 }
91324 return 0;
91325
91326 out_enomem_free_avc:
91327 +
91328 +#ifdef CONFIG_PAX_SEGMEXEC
91329 + if (avc_m)
91330 + anon_vma_chain_free(avc_m);
91331 +#endif
91332 +
91333 anon_vma_chain_free(avc);
91334 out_enomem:
91335 return -ENOMEM;
91336 @@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
91337 * Attach the anon_vmas from src to dst.
91338 * Returns 0 on success, -ENOMEM on failure.
91339 */
91340 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
91341 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
91342 {
91343 struct anon_vma_chain *avc, *pavc;
91344 struct anon_vma *root = NULL;
91345 @@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
91346 * the corresponding VMA in the parent process is attached to.
91347 * Returns 0 on success, non-zero on failure.
91348 */
91349 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
91350 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
91351 {
91352 struct anon_vma_chain *avc;
91353 struct anon_vma *anon_vma;
91354 @@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
91355 void __init anon_vma_init(void)
91356 {
91357 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
91358 - 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
91359 - anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
91360 + 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
91361 + anon_vma_ctor);
91362 + anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
91363 + SLAB_PANIC|SLAB_NO_SANITIZE);
91364 }
91365
91366 /*
91367 @@ -600,7 +636,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
91368 spinlock_t *ptl;
91369
91370 if (unlikely(PageHuge(page))) {
91371 + /* when pud is not present, pte will be NULL */
91372 pte = huge_pte_offset(mm, address);
91373 + if (!pte)
91374 + return NULL;
91375 +
91376 ptl = &mm->page_table_lock;
91377 goto check;
91378 }
91379 diff --git a/mm/shmem.c b/mm/shmem.c
91380 index 8297623..6b9dfe9 100644
91381 --- a/mm/shmem.c
91382 +++ b/mm/shmem.c
91383 @@ -33,7 +33,7 @@
91384 #include <linux/swap.h>
91385 #include <linux/aio.h>
91386
91387 -static struct vfsmount *shm_mnt;
91388 +struct vfsmount *shm_mnt;
91389
91390 #ifdef CONFIG_SHMEM
91391 /*
91392 @@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
91393 #define BOGO_DIRENT_SIZE 20
91394
91395 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
91396 -#define SHORT_SYMLINK_LEN 128
91397 +#define SHORT_SYMLINK_LEN 64
91398
91399 /*
91400 * shmem_fallocate and shmem_writepage communicate via inode->i_private
91401 @@ -2232,6 +2232,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
91402 static int shmem_xattr_validate(const char *name)
91403 {
91404 struct { const char *prefix; size_t len; } arr[] = {
91405 +
91406 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
91407 + { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
91408 +#endif
91409 +
91410 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
91411 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
91412 };
91413 @@ -2287,6 +2292,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
91414 if (err)
91415 return err;
91416
91417 +#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
91418 + if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
91419 + if (strcmp(name, XATTR_NAME_PAX_FLAGS))
91420 + return -EOPNOTSUPP;
91421 + if (size > 8)
91422 + return -EINVAL;
91423 + }
91424 +#endif
91425 +
91426 return simple_xattr_set(&info->xattrs, name, value, size, flags);
91427 }
91428
91429 @@ -2599,8 +2613,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
91430 int err = -ENOMEM;
91431
91432 /* Round up to L1_CACHE_BYTES to resist false sharing */
91433 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
91434 - L1_CACHE_BYTES), GFP_KERNEL);
91435 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
91436 if (!sbinfo)
91437 return -ENOMEM;
91438
91439 diff --git a/mm/slab.c b/mm/slab.c
91440 index 2580db0..0523956 100644
91441 --- a/mm/slab.c
91442 +++ b/mm/slab.c
91443 @@ -366,10 +366,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
91444 if ((x)->max_freeable < i) \
91445 (x)->max_freeable = i; \
91446 } while (0)
91447 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
91448 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
91449 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
91450 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
91451 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
91452 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
91453 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
91454 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
91455 +#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
91456 +#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
91457 #else
91458 #define STATS_INC_ACTIVE(x) do { } while (0)
91459 #define STATS_DEC_ACTIVE(x) do { } while (0)
91460 @@ -386,6 +388,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
91461 #define STATS_INC_ALLOCMISS(x) do { } while (0)
91462 #define STATS_INC_FREEHIT(x) do { } while (0)
91463 #define STATS_INC_FREEMISS(x) do { } while (0)
91464 +#define STATS_INC_SANITIZED(x) do { } while (0)
91465 +#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
91466 #endif
91467
91468 #if DEBUG
91469 @@ -477,7 +481,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
91470 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
91471 */
91472 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
91473 - const struct slab *slab, void *obj)
91474 + const struct slab *slab, const void *obj)
91475 {
91476 u32 offset = (obj - slab->s_mem);
91477 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
91478 @@ -1571,12 +1575,12 @@ void __init kmem_cache_init(void)
91479 */
91480
91481 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
91482 - kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
91483 + kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
91484
91485 if (INDEX_AC != INDEX_NODE)
91486 kmalloc_caches[INDEX_NODE] =
91487 create_kmalloc_cache("kmalloc-node",
91488 - kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
91489 + kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
91490
91491 slab_early_init = 0;
91492
91493 @@ -3577,6 +3581,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
91494 struct array_cache *ac = cpu_cache_get(cachep);
91495
91496 check_irq_off();
91497 +
91498 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
91499 + if (pax_sanitize_slab) {
91500 + if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
91501 + memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
91502 +
91503 + if (cachep->ctor)
91504 + cachep->ctor(objp);
91505 +
91506 + STATS_INC_SANITIZED(cachep);
91507 + } else
91508 + STATS_INC_NOT_SANITIZED(cachep);
91509 + }
91510 +#endif
91511 +
91512 kmemleak_free_recursive(objp, cachep->flags);
91513 objp = cache_free_debugcheck(cachep, objp, caller);
91514
91515 @@ -3805,6 +3824,7 @@ void kfree(const void *objp)
91516
91517 if (unlikely(ZERO_OR_NULL_PTR(objp)))
91518 return;
91519 + VM_BUG_ON(!virt_addr_valid(objp));
91520 local_irq_save(flags);
91521 kfree_debugcheck(objp);
91522 c = virt_to_cache(objp);
91523 @@ -4246,14 +4266,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
91524 }
91525 /* cpu stats */
91526 {
91527 - unsigned long allochit = atomic_read(&cachep->allochit);
91528 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
91529 - unsigned long freehit = atomic_read(&cachep->freehit);
91530 - unsigned long freemiss = atomic_read(&cachep->freemiss);
91531 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
91532 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
91533 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
91534 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
91535
91536 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
91537 allochit, allocmiss, freehit, freemiss);
91538 }
91539 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
91540 + {
91541 + unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
91542 + unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
91543 +
91544 + seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
91545 + }
91546 +#endif
91547 #endif
91548 }
91549
91550 @@ -4471,13 +4499,71 @@ static const struct file_operations proc_slabstats_operations = {
91551 static int __init slab_proc_init(void)
91552 {
91553 #ifdef CONFIG_DEBUG_SLAB_LEAK
91554 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
91555 + proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
91556 #endif
91557 return 0;
91558 }
91559 module_init(slab_proc_init);
91560 #endif
91561
91562 +bool is_usercopy_object(const void *ptr)
91563 +{
91564 + struct page *page;
91565 + struct kmem_cache *cachep;
91566 +
91567 + if (ZERO_OR_NULL_PTR(ptr))
91568 + return false;
91569 +
91570 + if (!slab_is_available())
91571 + return false;
91572 +
91573 + if (!virt_addr_valid(ptr))
91574 + return false;
91575 +
91576 + page = virt_to_head_page(ptr);
91577 +
91578 + if (!PageSlab(page))
91579 + return false;
91580 +
91581 + cachep = page->slab_cache;
91582 + return cachep->flags & SLAB_USERCOPY;
91583 +}
91584 +
91585 +#ifdef CONFIG_PAX_USERCOPY
91586 +const char *check_heap_object(const void *ptr, unsigned long n)
91587 +{
91588 + struct page *page;
91589 + struct kmem_cache *cachep;
91590 + struct slab *slabp;
91591 + unsigned int objnr;
91592 + unsigned long offset;
91593 +
91594 + if (ZERO_OR_NULL_PTR(ptr))
91595 + return "<null>";
91596 +
91597 + if (!virt_addr_valid(ptr))
91598 + return NULL;
91599 +
91600 + page = virt_to_head_page(ptr);
91601 +
91602 + if (!PageSlab(page))
91603 + return NULL;
91604 +
91605 + cachep = page->slab_cache;
91606 + if (!(cachep->flags & SLAB_USERCOPY))
91607 + return cachep->name;
91608 +
91609 + slabp = page->slab_page;
91610 + objnr = obj_to_index(cachep, slabp, ptr);
91611 + BUG_ON(objnr >= cachep->num);
91612 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
91613 + if (offset <= cachep->object_size && n <= cachep->object_size - offset)
91614 + return NULL;
91615 +
91616 + return cachep->name;
91617 +}
91618 +#endif
91619 +
91620 /**
91621 * ksize - get the actual amount of memory allocated for a given object
91622 * @objp: Pointer to the object
91623 diff --git a/mm/slab.h b/mm/slab.h
91624 index a535033..2f98fe5 100644
91625 --- a/mm/slab.h
91626 +++ b/mm/slab.h
91627 @@ -32,6 +32,15 @@ extern struct list_head slab_caches;
91628 /* The slab cache that manages slab cache information */
91629 extern struct kmem_cache *kmem_cache;
91630
91631 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
91632 +#ifdef CONFIG_X86_64
91633 +#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
91634 +#else
91635 +#define PAX_MEMORY_SANITIZE_VALUE '\xff'
91636 +#endif
91637 +extern bool pax_sanitize_slab;
91638 +#endif
91639 +
91640 unsigned long calculate_alignment(unsigned long flags,
91641 unsigned long align, unsigned long size);
91642
91643 @@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
91644
91645 /* Legal flag mask for kmem_cache_create(), for various configurations */
91646 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
91647 - SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
91648 + SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
91649 + SLAB_USERCOPY | SLAB_NO_SANITIZE)
91650
91651 #if defined(CONFIG_DEBUG_SLAB)
91652 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
91653 @@ -231,6 +241,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
91654 return s;
91655
91656 page = virt_to_head_page(x);
91657 +
91658 + BUG_ON(!PageSlab(page));
91659 +
91660 cachep = page->slab_cache;
91661 if (slab_equal_or_root(cachep, s))
91662 return cachep;
91663 diff --git a/mm/slab_common.c b/mm/slab_common.c
91664 index e2e98af..3b1a163 100644
91665 --- a/mm/slab_common.c
91666 +++ b/mm/slab_common.c
91667 @@ -23,11 +23,22 @@
91668
91669 #include "slab.h"
91670
91671 -enum slab_state slab_state;
91672 +enum slab_state slab_state __read_only;
91673 LIST_HEAD(slab_caches);
91674 DEFINE_MUTEX(slab_mutex);
91675 struct kmem_cache *kmem_cache;
91676
91677 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
91678 +bool pax_sanitize_slab __read_only = true;
91679 +static int __init pax_sanitize_slab_setup(char *str)
91680 +{
91681 + pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
91682 + printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
91683 + return 1;
91684 +}
91685 +__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
91686 +#endif
91687 +
91688 #ifdef CONFIG_DEBUG_VM
91689 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
91690 size_t size)
91691 @@ -212,7 +223,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
91692
91693 err = __kmem_cache_create(s, flags);
91694 if (!err) {
91695 - s->refcount = 1;
91696 + atomic_set(&s->refcount, 1);
91697 list_add(&s->list, &slab_caches);
91698 memcg_cache_list_add(memcg, s);
91699 } else {
91700 @@ -258,8 +269,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
91701
91702 get_online_cpus();
91703 mutex_lock(&slab_mutex);
91704 - s->refcount--;
91705 - if (!s->refcount) {
91706 + if (atomic_dec_and_test(&s->refcount)) {
91707 list_del(&s->list);
91708
91709 if (!__kmem_cache_shutdown(s)) {
91710 @@ -305,7 +315,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
91711 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
91712 name, size, err);
91713
91714 - s->refcount = -1; /* Exempt from merging for now */
91715 + atomic_set(&s->refcount, -1); /* Exempt from merging for now */
91716 }
91717
91718 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
91719 @@ -318,7 +328,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
91720
91721 create_boot_cache(s, name, size, flags);
91722 list_add(&s->list, &slab_caches);
91723 - s->refcount = 1;
91724 + atomic_set(&s->refcount, 1);
91725 return s;
91726 }
91727
91728 @@ -330,6 +340,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
91729 EXPORT_SYMBOL(kmalloc_dma_caches);
91730 #endif
91731
91732 +#ifdef CONFIG_PAX_USERCOPY_SLABS
91733 +struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
91734 +EXPORT_SYMBOL(kmalloc_usercopy_caches);
91735 +#endif
91736 +
91737 /*
91738 * Conversion table for small slabs sizes / 8 to the index in the
91739 * kmalloc array. This is necessary for slabs < 192 since we have non power
91740 @@ -394,6 +409,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
91741 return kmalloc_dma_caches[index];
91742
91743 #endif
91744 +
91745 +#ifdef CONFIG_PAX_USERCOPY_SLABS
91746 + if (unlikely((flags & GFP_USERCOPY)))
91747 + return kmalloc_usercopy_caches[index];
91748 +
91749 +#endif
91750 +
91751 return kmalloc_caches[index];
91752 }
91753
91754 @@ -450,7 +472,7 @@ void __init create_kmalloc_caches(unsigned long flags)
91755 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
91756 if (!kmalloc_caches[i]) {
91757 kmalloc_caches[i] = create_kmalloc_cache(NULL,
91758 - 1 << i, flags);
91759 + 1 << i, SLAB_USERCOPY | flags);
91760 }
91761
91762 /*
91763 @@ -459,10 +481,10 @@ void __init create_kmalloc_caches(unsigned long flags)
91764 * earlier power of two caches
91765 */
91766 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
91767 - kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
91768 + kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
91769
91770 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
91771 - kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
91772 + kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
91773 }
91774
91775 /* Kmalloc array is now usable */
91776 @@ -495,6 +517,23 @@ void __init create_kmalloc_caches(unsigned long flags)
91777 }
91778 }
91779 #endif
91780 +
91781 +#ifdef CONFIG_PAX_USERCOPY_SLABS
91782 + for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
91783 + struct kmem_cache *s = kmalloc_caches[i];
91784 +
91785 + if (s) {
91786 + int size = kmalloc_size(i);
91787 + char *n = kasprintf(GFP_NOWAIT,
91788 + "usercopy-kmalloc-%d", size);
91789 +
91790 + BUG_ON(!n);
91791 + kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
91792 + size, SLAB_USERCOPY | flags);
91793 + }
91794 + }
91795 +#endif
91796 +
91797 }
91798 #endif /* !CONFIG_SLOB */
91799
91800 @@ -535,6 +574,9 @@ void print_slabinfo_header(struct seq_file *m)
91801 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
91802 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
91803 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
91804 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
91805 + seq_puts(m, " : pax <sanitized> <not_sanitized>");
91806 +#endif
91807 #endif
91808 seq_putc(m, '\n');
91809 }
91810 diff --git a/mm/slob.c b/mm/slob.c
91811 index 4bf8809..98a6914 100644
91812 --- a/mm/slob.c
91813 +++ b/mm/slob.c
91814 @@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
91815 /*
91816 * Return the size of a slob block.
91817 */
91818 -static slobidx_t slob_units(slob_t *s)
91819 +static slobidx_t slob_units(const slob_t *s)
91820 {
91821 if (s->units > 0)
91822 return s->units;
91823 @@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
91824 /*
91825 * Return the next free slob block pointer after this one.
91826 */
91827 -static slob_t *slob_next(slob_t *s)
91828 +static slob_t *slob_next(const slob_t *s)
91829 {
91830 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
91831 slobidx_t next;
91832 @@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
91833 /*
91834 * Returns true if s is the last free block in its page.
91835 */
91836 -static int slob_last(slob_t *s)
91837 +static int slob_last(const slob_t *s)
91838 {
91839 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
91840 }
91841
91842 -static void *slob_new_pages(gfp_t gfp, int order, int node)
91843 +static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
91844 {
91845 - void *page;
91846 + struct page *page;
91847
91848 #ifdef CONFIG_NUMA
91849 if (node != NUMA_NO_NODE)
91850 @@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
91851 if (!page)
91852 return NULL;
91853
91854 - return page_address(page);
91855 + __SetPageSlab(page);
91856 + return page;
91857 }
91858
91859 -static void slob_free_pages(void *b, int order)
91860 +static void slob_free_pages(struct page *sp, int order)
91861 {
91862 if (current->reclaim_state)
91863 current->reclaim_state->reclaimed_slab += 1 << order;
91864 - free_pages((unsigned long)b, order);
91865 + __ClearPageSlab(sp);
91866 + page_mapcount_reset(sp);
91867 + sp->private = 0;
91868 + __free_pages(sp, order);
91869 }
91870
91871 /*
91872 @@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
91873
91874 /* Not enough space: must allocate a new page */
91875 if (!b) {
91876 - b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
91877 - if (!b)
91878 + sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
91879 + if (!sp)
91880 return NULL;
91881 - sp = virt_to_page(b);
91882 - __SetPageSlab(sp);
91883 + b = page_address(sp);
91884
91885 spin_lock_irqsave(&slob_lock, flags);
91886 sp->units = SLOB_UNITS(PAGE_SIZE);
91887 sp->freelist = b;
91888 + sp->private = 0;
91889 INIT_LIST_HEAD(&sp->list);
91890 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
91891 set_slob_page_free(sp, slob_list);
91892 @@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
91893 if (slob_page_free(sp))
91894 clear_slob_page_free(sp);
91895 spin_unlock_irqrestore(&slob_lock, flags);
91896 - __ClearPageSlab(sp);
91897 - page_mapcount_reset(sp);
91898 - slob_free_pages(b, 0);
91899 + slob_free_pages(sp, 0);
91900 return;
91901 }
91902
91903 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
91904 + if (pax_sanitize_slab)
91905 + memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
91906 +#endif
91907 +
91908 if (!slob_page_free(sp)) {
91909 /* This slob page is about to become partially free. Easy! */
91910 sp->units = units;
91911 @@ -424,11 +431,10 @@ out:
91912 */
91913
91914 static __always_inline void *
91915 -__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
91916 +__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
91917 {
91918 - unsigned int *m;
91919 - int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
91920 - void *ret;
91921 + slob_t *m;
91922 + void *ret = NULL;
91923
91924 gfp &= gfp_allowed_mask;
91925
91926 @@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
91927
91928 if (!m)
91929 return NULL;
91930 - *m = size;
91931 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
91932 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
91933 + m[0].units = size;
91934 + m[1].units = align;
91935 ret = (void *)m + align;
91936
91937 trace_kmalloc_node(caller, ret,
91938 size, size + align, gfp, node);
91939 } else {
91940 unsigned int order = get_order(size);
91941 + struct page *page;
91942
91943 if (likely(order))
91944 gfp |= __GFP_COMP;
91945 - ret = slob_new_pages(gfp, order, node);
91946 + page = slob_new_pages(gfp, order, node);
91947 + if (page) {
91948 + ret = page_address(page);
91949 + page->private = size;
91950 + }
91951
91952 trace_kmalloc_node(caller, ret,
91953 size, PAGE_SIZE << order, gfp, node);
91954 }
91955
91956 - kmemleak_alloc(ret, size, 1, gfp);
91957 + return ret;
91958 +}
91959 +
91960 +static __always_inline void *
91961 +__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
91962 +{
91963 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
91964 + void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
91965 +
91966 + if (!ZERO_OR_NULL_PTR(ret))
91967 + kmemleak_alloc(ret, size, 1, gfp);
91968 return ret;
91969 }
91970
91971 @@ -493,34 +517,112 @@ void kfree(const void *block)
91972 return;
91973 kmemleak_free(block);
91974
91975 + VM_BUG_ON(!virt_addr_valid(block));
91976 sp = virt_to_page(block);
91977 - if (PageSlab(sp)) {
91978 + VM_BUG_ON(!PageSlab(sp));
91979 + if (!sp->private) {
91980 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
91981 - unsigned int *m = (unsigned int *)(block - align);
91982 - slob_free(m, *m + align);
91983 - } else
91984 + slob_t *m = (slob_t *)(block - align);
91985 + slob_free(m, m[0].units + align);
91986 + } else {
91987 + __ClearPageSlab(sp);
91988 + page_mapcount_reset(sp);
91989 + sp->private = 0;
91990 __free_pages(sp, compound_order(sp));
91991 + }
91992 }
91993 EXPORT_SYMBOL(kfree);
91994
91995 +bool is_usercopy_object(const void *ptr)
91996 +{
91997 + if (!slab_is_available())
91998 + return false;
91999 +
92000 + // PAX: TODO
92001 +
92002 + return false;
92003 +}
92004 +
92005 +#ifdef CONFIG_PAX_USERCOPY
92006 +const char *check_heap_object(const void *ptr, unsigned long n)
92007 +{
92008 + struct page *page;
92009 + const slob_t *free;
92010 + const void *base;
92011 + unsigned long flags;
92012 +
92013 + if (ZERO_OR_NULL_PTR(ptr))
92014 + return "<null>";
92015 +
92016 + if (!virt_addr_valid(ptr))
92017 + return NULL;
92018 +
92019 + page = virt_to_head_page(ptr);
92020 + if (!PageSlab(page))
92021 + return NULL;
92022 +
92023 + if (page->private) {
92024 + base = page;
92025 + if (base <= ptr && n <= page->private - (ptr - base))
92026 + return NULL;
92027 + return "<slob>";
92028 + }
92029 +
92030 + /* some tricky double walking to find the chunk */
92031 + spin_lock_irqsave(&slob_lock, flags);
92032 + base = (void *)((unsigned long)ptr & PAGE_MASK);
92033 + free = page->freelist;
92034 +
92035 + while (!slob_last(free) && (void *)free <= ptr) {
92036 + base = free + slob_units(free);
92037 + free = slob_next(free);
92038 + }
92039 +
92040 + while (base < (void *)free) {
92041 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
92042 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
92043 + int offset;
92044 +
92045 + if (ptr < base + align)
92046 + break;
92047 +
92048 + offset = ptr - base - align;
92049 + if (offset >= m) {
92050 + base += size;
92051 + continue;
92052 + }
92053 +
92054 + if (n > m - offset)
92055 + break;
92056 +
92057 + spin_unlock_irqrestore(&slob_lock, flags);
92058 + return NULL;
92059 + }
92060 +
92061 + spin_unlock_irqrestore(&slob_lock, flags);
92062 + return "<slob>";
92063 +}
92064 +#endif
92065 +
92066 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
92067 size_t ksize(const void *block)
92068 {
92069 struct page *sp;
92070 int align;
92071 - unsigned int *m;
92072 + slob_t *m;
92073
92074 BUG_ON(!block);
92075 if (unlikely(block == ZERO_SIZE_PTR))
92076 return 0;
92077
92078 sp = virt_to_page(block);
92079 - if (unlikely(!PageSlab(sp)))
92080 - return PAGE_SIZE << compound_order(sp);
92081 + VM_BUG_ON(!PageSlab(sp));
92082 + if (sp->private)
92083 + return sp->private;
92084
92085 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
92086 - m = (unsigned int *)(block - align);
92087 - return SLOB_UNITS(*m) * SLOB_UNIT;
92088 + m = (slob_t *)(block - align);
92089 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
92090 }
92091 EXPORT_SYMBOL(ksize);
92092
92093 @@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
92094
92095 void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
92096 {
92097 - void *b;
92098 + void *b = NULL;
92099
92100 flags &= gfp_allowed_mask;
92101
92102 lockdep_trace_alloc(flags);
92103
92104 +#ifdef CONFIG_PAX_USERCOPY_SLABS
92105 + b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
92106 +#else
92107 if (c->size < PAGE_SIZE) {
92108 b = slob_alloc(c->size, flags, c->align, node);
92109 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
92110 SLOB_UNITS(c->size) * SLOB_UNIT,
92111 flags, node);
92112 } else {
92113 - b = slob_new_pages(flags, get_order(c->size), node);
92114 + struct page *sp;
92115 +
92116 + sp = slob_new_pages(flags, get_order(c->size), node);
92117 + if (sp) {
92118 + b = page_address(sp);
92119 + sp->private = c->size;
92120 + }
92121 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
92122 PAGE_SIZE << get_order(c->size),
92123 flags, node);
92124 }
92125 +#endif
92126
92127 if (b && c->ctor)
92128 c->ctor(b);
92129 @@ -584,10 +696,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
92130
92131 static void __kmem_cache_free(void *b, int size)
92132 {
92133 - if (size < PAGE_SIZE)
92134 + struct page *sp;
92135 +
92136 + sp = virt_to_page(b);
92137 + BUG_ON(!PageSlab(sp));
92138 + if (!sp->private)
92139 slob_free(b, size);
92140 else
92141 - slob_free_pages(b, get_order(size));
92142 + slob_free_pages(sp, get_order(size));
92143 }
92144
92145 static void kmem_rcu_free(struct rcu_head *head)
92146 @@ -600,17 +716,31 @@ static void kmem_rcu_free(struct rcu_head *head)
92147
92148 void kmem_cache_free(struct kmem_cache *c, void *b)
92149 {
92150 + int size = c->size;
92151 +
92152 +#ifdef CONFIG_PAX_USERCOPY_SLABS
92153 + if (size + c->align < PAGE_SIZE) {
92154 + size += c->align;
92155 + b -= c->align;
92156 + }
92157 +#endif
92158 +
92159 kmemleak_free_recursive(b, c->flags);
92160 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
92161 struct slob_rcu *slob_rcu;
92162 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
92163 - slob_rcu->size = c->size;
92164 + slob_rcu = b + (size - sizeof(struct slob_rcu));
92165 + slob_rcu->size = size;
92166 call_rcu(&slob_rcu->head, kmem_rcu_free);
92167 } else {
92168 - __kmem_cache_free(b, c->size);
92169 + __kmem_cache_free(b, size);
92170 }
92171
92172 +#ifdef CONFIG_PAX_USERCOPY_SLABS
92173 + trace_kfree(_RET_IP_, b);
92174 +#else
92175 trace_kmem_cache_free(_RET_IP_, b);
92176 +#endif
92177 +
92178 }
92179 EXPORT_SYMBOL(kmem_cache_free);
92180
92181 diff --git a/mm/slub.c b/mm/slub.c
92182 index 96f2169..9111a59 100644
92183 --- a/mm/slub.c
92184 +++ b/mm/slub.c
92185 @@ -207,7 +207,7 @@ struct track {
92186
92187 enum track_item { TRACK_ALLOC, TRACK_FREE };
92188
92189 -#ifdef CONFIG_SYSFS
92190 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
92191 static int sysfs_slab_add(struct kmem_cache *);
92192 static int sysfs_slab_alias(struct kmem_cache *, const char *);
92193 static void sysfs_slab_remove(struct kmem_cache *);
92194 @@ -530,7 +530,7 @@ static void print_track(const char *s, struct track *t)
92195 if (!t->addr)
92196 return;
92197
92198 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
92199 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
92200 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
92201 #ifdef CONFIG_STACKTRACE
92202 {
92203 @@ -2616,6 +2616,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
92204
92205 slab_free_hook(s, x);
92206
92207 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
92208 + if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
92209 + memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
92210 + if (s->ctor)
92211 + s->ctor(x);
92212 + }
92213 +#endif
92214 +
92215 redo:
92216 /*
92217 * Determine the currently cpus per cpu slab.
92218 @@ -2683,7 +2691,7 @@ static int slub_min_objects;
92219 * Merge control. If this is set then no merging of slab caches will occur.
92220 * (Could be removed. This was introduced to pacify the merge skeptics.)
92221 */
92222 -static int slub_nomerge;
92223 +static int slub_nomerge = 1;
92224
92225 /*
92226 * Calculate the order of allocation given an slab object size.
92227 @@ -2960,6 +2968,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
92228 s->inuse = size;
92229
92230 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
92231 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
92232 + (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
92233 +#endif
92234 s->ctor)) {
92235 /*
92236 * Relocate free pointer after the object if it is not
92237 @@ -3305,6 +3316,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
92238 EXPORT_SYMBOL(__kmalloc_node);
92239 #endif
92240
92241 +bool is_usercopy_object(const void *ptr)
92242 +{
92243 + struct page *page;
92244 + struct kmem_cache *s;
92245 +
92246 + if (ZERO_OR_NULL_PTR(ptr))
92247 + return false;
92248 +
92249 + if (!slab_is_available())
92250 + return false;
92251 +
92252 + if (!virt_addr_valid(ptr))
92253 + return false;
92254 +
92255 + page = virt_to_head_page(ptr);
92256 +
92257 + if (!PageSlab(page))
92258 + return false;
92259 +
92260 + s = page->slab_cache;
92261 + return s->flags & SLAB_USERCOPY;
92262 +}
92263 +
92264 +#ifdef CONFIG_PAX_USERCOPY
92265 +const char *check_heap_object(const void *ptr, unsigned long n)
92266 +{
92267 + struct page *page;
92268 + struct kmem_cache *s;
92269 + unsigned long offset;
92270 +
92271 + if (ZERO_OR_NULL_PTR(ptr))
92272 + return "<null>";
92273 +
92274 + if (!virt_addr_valid(ptr))
92275 + return NULL;
92276 +
92277 + page = virt_to_head_page(ptr);
92278 +
92279 + if (!PageSlab(page))
92280 + return NULL;
92281 +
92282 + s = page->slab_cache;
92283 + if (!(s->flags & SLAB_USERCOPY))
92284 + return s->name;
92285 +
92286 + offset = (ptr - page_address(page)) % s->size;
92287 + if (offset <= s->object_size && n <= s->object_size - offset)
92288 + return NULL;
92289 +
92290 + return s->name;
92291 +}
92292 +#endif
92293 +
92294 size_t ksize(const void *object)
92295 {
92296 struct page *page;
92297 @@ -3333,6 +3397,7 @@ void kfree(const void *x)
92298 if (unlikely(ZERO_OR_NULL_PTR(x)))
92299 return;
92300
92301 + VM_BUG_ON(!virt_addr_valid(x));
92302 page = virt_to_head_page(x);
92303 if (unlikely(!PageSlab(page))) {
92304 BUG_ON(!PageCompound(page));
92305 @@ -3638,7 +3703,7 @@ static int slab_unmergeable(struct kmem_cache *s)
92306 /*
92307 * We may have set a slab to be unmergeable during bootstrap.
92308 */
92309 - if (s->refcount < 0)
92310 + if (atomic_read(&s->refcount) < 0)
92311 return 1;
92312
92313 return 0;
92314 @@ -3696,7 +3761,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
92315
92316 s = find_mergeable(memcg, size, align, flags, name, ctor);
92317 if (s) {
92318 - s->refcount++;
92319 + atomic_inc(&s->refcount);
92320 /*
92321 * Adjust the object sizes so that we clear
92322 * the complete object on kzalloc.
92323 @@ -3705,7 +3770,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
92324 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
92325
92326 if (sysfs_slab_alias(s, name)) {
92327 - s->refcount--;
92328 + atomic_dec(&s->refcount);
92329 s = NULL;
92330 }
92331 }
92332 @@ -3825,7 +3890,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
92333 }
92334 #endif
92335
92336 -#ifdef CONFIG_SYSFS
92337 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
92338 static int count_inuse(struct page *page)
92339 {
92340 return page->inuse;
92341 @@ -4214,12 +4279,12 @@ static void resiliency_test(void)
92342 validate_slab_cache(kmalloc_caches[9]);
92343 }
92344 #else
92345 -#ifdef CONFIG_SYSFS
92346 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
92347 static void resiliency_test(void) {};
92348 #endif
92349 #endif
92350
92351 -#ifdef CONFIG_SYSFS
92352 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
92353 enum slab_stat_type {
92354 SL_ALL, /* All slabs */
92355 SL_PARTIAL, /* Only partially allocated slabs */
92356 @@ -4459,7 +4524,7 @@ SLAB_ATTR_RO(ctor);
92357
92358 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
92359 {
92360 - return sprintf(buf, "%d\n", s->refcount - 1);
92361 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
92362 }
92363 SLAB_ATTR_RO(aliases);
92364
92365 @@ -4547,6 +4612,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
92366 SLAB_ATTR_RO(cache_dma);
92367 #endif
92368
92369 +#ifdef CONFIG_PAX_USERCOPY_SLABS
92370 +static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
92371 +{
92372 + return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
92373 +}
92374 +SLAB_ATTR_RO(usercopy);
92375 +#endif
92376 +
92377 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
92378 {
92379 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
92380 @@ -4881,6 +4954,9 @@ static struct attribute *slab_attrs[] = {
92381 #ifdef CONFIG_ZONE_DMA
92382 &cache_dma_attr.attr,
92383 #endif
92384 +#ifdef CONFIG_PAX_USERCOPY_SLABS
92385 + &usercopy_attr.attr,
92386 +#endif
92387 #ifdef CONFIG_NUMA
92388 &remote_node_defrag_ratio_attr.attr,
92389 #endif
92390 @@ -5113,6 +5189,7 @@ static char *create_unique_id(struct kmem_cache *s)
92391 return name;
92392 }
92393
92394 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
92395 static int sysfs_slab_add(struct kmem_cache *s)
92396 {
92397 int err;
92398 @@ -5136,7 +5213,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
92399 }
92400
92401 s->kobj.kset = slab_kset;
92402 - err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
92403 + err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
92404 if (err) {
92405 kobject_put(&s->kobj);
92406 return err;
92407 @@ -5170,6 +5247,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
92408 kobject_del(&s->kobj);
92409 kobject_put(&s->kobj);
92410 }
92411 +#endif
92412
92413 /*
92414 * Need to buffer aliases during bootup until sysfs becomes
92415 @@ -5183,6 +5261,7 @@ struct saved_alias {
92416
92417 static struct saved_alias *alias_list;
92418
92419 +#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
92420 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
92421 {
92422 struct saved_alias *al;
92423 @@ -5205,6 +5284,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
92424 alias_list = al;
92425 return 0;
92426 }
92427 +#endif
92428
92429 static int __init slab_sysfs_init(void)
92430 {
92431 diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
92432 index 27eeab3..7c3f7f2 100644
92433 --- a/mm/sparse-vmemmap.c
92434 +++ b/mm/sparse-vmemmap.c
92435 @@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
92436 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
92437 if (!p)
92438 return NULL;
92439 - pud_populate(&init_mm, pud, p);
92440 + pud_populate_kernel(&init_mm, pud, p);
92441 }
92442 return pud;
92443 }
92444 @@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
92445 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
92446 if (!p)
92447 return NULL;
92448 - pgd_populate(&init_mm, pgd, p);
92449 + pgd_populate_kernel(&init_mm, pgd, p);
92450 }
92451 return pgd;
92452 }
92453 diff --git a/mm/sparse.c b/mm/sparse.c
92454 index 4ac1d7e..bbfcb1f 100644
92455 --- a/mm/sparse.c
92456 +++ b/mm/sparse.c
92457 @@ -745,7 +745,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
92458
92459 for (i = 0; i < PAGES_PER_SECTION; i++) {
92460 if (PageHWPoison(&memmap[i])) {
92461 - atomic_long_sub(1, &num_poisoned_pages);
92462 + atomic_long_sub_unchecked(1, &num_poisoned_pages);
92463 ClearPageHWPoison(&memmap[i]);
92464 }
92465 }
92466 diff --git a/mm/swap.c b/mm/swap.c
92467 index 759c3ca..7c1a5b4 100644
92468 --- a/mm/swap.c
92469 +++ b/mm/swap.c
92470 @@ -77,6 +77,8 @@ static void __put_compound_page(struct page *page)
92471
92472 __page_cache_release(page);
92473 dtor = get_compound_page_dtor(page);
92474 + if (!PageHuge(page))
92475 + BUG_ON(dtor != free_compound_page);
92476 (*dtor)(page);
92477 }
92478
92479 diff --git a/mm/swapfile.c b/mm/swapfile.c
92480 index de7c904..c84bf11 100644
92481 --- a/mm/swapfile.c
92482 +++ b/mm/swapfile.c
92483 @@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
92484
92485 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
92486 /* Activity counter to indicate that a swapon or swapoff has occurred */
92487 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
92488 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
92489
92490 static inline unsigned char swap_count(unsigned char ent)
92491 {
92492 @@ -1949,7 +1949,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
92493 }
92494 filp_close(swap_file, NULL);
92495 err = 0;
92496 - atomic_inc(&proc_poll_event);
92497 + atomic_inc_unchecked(&proc_poll_event);
92498 wake_up_interruptible(&proc_poll_wait);
92499
92500 out_dput:
92501 @@ -1966,8 +1966,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
92502
92503 poll_wait(file, &proc_poll_wait, wait);
92504
92505 - if (seq->poll_event != atomic_read(&proc_poll_event)) {
92506 - seq->poll_event = atomic_read(&proc_poll_event);
92507 + if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
92508 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
92509 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
92510 }
92511
92512 @@ -2065,7 +2065,7 @@ static int swaps_open(struct inode *inode, struct file *file)
92513 return ret;
92514
92515 seq = file->private_data;
92516 - seq->poll_event = atomic_read(&proc_poll_event);
92517 + seq->poll_event = atomic_read_unchecked(&proc_poll_event);
92518 return 0;
92519 }
92520
92521 @@ -2524,7 +2524,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
92522 (frontswap_map) ? "FS" : "");
92523
92524 mutex_unlock(&swapon_mutex);
92525 - atomic_inc(&proc_poll_event);
92526 + atomic_inc_unchecked(&proc_poll_event);
92527 wake_up_interruptible(&proc_poll_wait);
92528
92529 if (S_ISREG(inode->i_mode))
92530 diff --git a/mm/util.c b/mm/util.c
92531 index eaf63fc2..32b2629 100644
92532 --- a/mm/util.c
92533 +++ b/mm/util.c
92534 @@ -294,6 +294,12 @@ done:
92535 void arch_pick_mmap_layout(struct mm_struct *mm)
92536 {
92537 mm->mmap_base = TASK_UNMAPPED_BASE;
92538 +
92539 +#ifdef CONFIG_PAX_RANDMMAP
92540 + if (mm->pax_flags & MF_PAX_RANDMMAP)
92541 + mm->mmap_base += mm->delta_mmap;
92542 +#endif
92543 +
92544 mm->get_unmapped_area = arch_get_unmapped_area;
92545 }
92546 #endif
92547 diff --git a/mm/vmalloc.c b/mm/vmalloc.c
92548 index 1074543..136dbe0 100644
92549 --- a/mm/vmalloc.c
92550 +++ b/mm/vmalloc.c
92551 @@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
92552
92553 pte = pte_offset_kernel(pmd, addr);
92554 do {
92555 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
92556 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
92557 +
92558 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
92559 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
92560 + BUG_ON(!pte_exec(*pte));
92561 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
92562 + continue;
92563 + }
92564 +#endif
92565 +
92566 + {
92567 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
92568 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
92569 + }
92570 } while (pte++, addr += PAGE_SIZE, addr != end);
92571 }
92572
92573 @@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
92574 pte = pte_alloc_kernel(pmd, addr);
92575 if (!pte)
92576 return -ENOMEM;
92577 +
92578 + pax_open_kernel();
92579 do {
92580 struct page *page = pages[*nr];
92581
92582 - if (WARN_ON(!pte_none(*pte)))
92583 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
92584 + if (pgprot_val(prot) & _PAGE_NX)
92585 +#endif
92586 +
92587 + if (!pte_none(*pte)) {
92588 + pax_close_kernel();
92589 + WARN_ON(1);
92590 return -EBUSY;
92591 - if (WARN_ON(!page))
92592 + }
92593 + if (!page) {
92594 + pax_close_kernel();
92595 + WARN_ON(1);
92596 return -ENOMEM;
92597 + }
92598 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
92599 (*nr)++;
92600 } while (pte++, addr += PAGE_SIZE, addr != end);
92601 + pax_close_kernel();
92602 return 0;
92603 }
92604
92605 @@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
92606 pmd_t *pmd;
92607 unsigned long next;
92608
92609 - pmd = pmd_alloc(&init_mm, pud, addr);
92610 + pmd = pmd_alloc_kernel(&init_mm, pud, addr);
92611 if (!pmd)
92612 return -ENOMEM;
92613 do {
92614 @@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
92615 pud_t *pud;
92616 unsigned long next;
92617
92618 - pud = pud_alloc(&init_mm, pgd, addr);
92619 + pud = pud_alloc_kernel(&init_mm, pgd, addr);
92620 if (!pud)
92621 return -ENOMEM;
92622 do {
92623 @@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
92624 if (addr >= MODULES_VADDR && addr < MODULES_END)
92625 return 1;
92626 #endif
92627 +
92628 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
92629 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
92630 + return 1;
92631 +#endif
92632 +
92633 return is_vmalloc_addr(x);
92634 }
92635
92636 @@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
92637
92638 if (!pgd_none(*pgd)) {
92639 pud_t *pud = pud_offset(pgd, addr);
92640 +#ifdef CONFIG_X86
92641 + if (!pud_large(*pud))
92642 +#endif
92643 if (!pud_none(*pud)) {
92644 pmd_t *pmd = pmd_offset(pud, addr);
92645 +#ifdef CONFIG_X86
92646 + if (!pmd_large(*pmd))
92647 +#endif
92648 if (!pmd_none(*pmd)) {
92649 pte_t *ptep, pte;
92650
92651 @@ -1303,6 +1339,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
92652 struct vm_struct *area;
92653
92654 BUG_ON(in_interrupt());
92655 +
92656 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
92657 + if (flags & VM_KERNEXEC) {
92658 + if (start != VMALLOC_START || end != VMALLOC_END)
92659 + return NULL;
92660 + start = (unsigned long)MODULES_EXEC_VADDR;
92661 + end = (unsigned long)MODULES_EXEC_END;
92662 + }
92663 +#endif
92664 +
92665 if (flags & VM_IOREMAP)
92666 align = 1ul << clamp(fls(size), PAGE_SHIFT, IOREMAP_MAX_ORDER);
92667
92668 @@ -1528,6 +1574,11 @@ void *vmap(struct page **pages, unsigned int count,
92669 if (count > totalram_pages)
92670 return NULL;
92671
92672 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
92673 + if (!(pgprot_val(prot) & _PAGE_NX))
92674 + flags |= VM_KERNEXEC;
92675 +#endif
92676 +
92677 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
92678 __builtin_return_address(0));
92679 if (!area)
92680 @@ -1629,6 +1680,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
92681 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
92682 goto fail;
92683
92684 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
92685 + if (!(pgprot_val(prot) & _PAGE_NX))
92686 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED | VM_KERNEXEC,
92687 + VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
92688 + else
92689 +#endif
92690 +
92691 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNINITIALIZED,
92692 start, end, node, gfp_mask, caller);
92693 if (!area)
92694 @@ -1805,10 +1863,9 @@ EXPORT_SYMBOL(vzalloc_node);
92695 * For tight control over page level allocator and protection flags
92696 * use __vmalloc() instead.
92697 */
92698 -
92699 void *vmalloc_exec(unsigned long size)
92700 {
92701 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
92702 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
92703 NUMA_NO_NODE, __builtin_return_address(0));
92704 }
92705
92706 @@ -2115,6 +2172,8 @@ int remap_vmalloc_range_partial(struct vm_area_struct *vma, unsigned long uaddr,
92707 {
92708 struct vm_struct *area;
92709
92710 + BUG_ON(vma->vm_mirror);
92711 +
92712 size = PAGE_ALIGN(size);
92713
92714 if (!PAGE_ALIGNED(uaddr) || !PAGE_ALIGNED(kaddr))
92715 @@ -2600,7 +2659,11 @@ static int s_show(struct seq_file *m, void *p)
92716 v->addr, v->addr + v->size, v->size);
92717
92718 if (v->caller)
92719 +#ifdef CONFIG_GRKERNSEC_HIDESYM
92720 + seq_printf(m, " %pK", v->caller);
92721 +#else
92722 seq_printf(m, " %pS", v->caller);
92723 +#endif
92724
92725 if (v->nr_pages)
92726 seq_printf(m, " pages=%d", v->nr_pages);
92727 diff --git a/mm/vmstat.c b/mm/vmstat.c
92728 index 5a442a7..5eb281e 100644
92729 --- a/mm/vmstat.c
92730 +++ b/mm/vmstat.c
92731 @@ -79,7 +79,7 @@ void vm_events_fold_cpu(int cpu)
92732 *
92733 * vm_stat contains the global counters
92734 */
92735 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
92736 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
92737 EXPORT_SYMBOL(vm_stat);
92738
92739 #ifdef CONFIG_SMP
92740 @@ -423,7 +423,7 @@ static inline void fold_diff(int *diff)
92741
92742 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
92743 if (diff[i])
92744 - atomic_long_add(diff[i], &vm_stat[i]);
92745 + atomic_long_add_unchecked(diff[i], &vm_stat[i]);
92746 }
92747
92748 /*
92749 @@ -455,7 +455,7 @@ static void refresh_cpu_vm_stats(void)
92750 v = this_cpu_xchg(p->vm_stat_diff[i], 0);
92751 if (v) {
92752
92753 - atomic_long_add(v, &zone->vm_stat[i]);
92754 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
92755 global_diff[i] += v;
92756 #ifdef CONFIG_NUMA
92757 /* 3 seconds idle till flush */
92758 @@ -517,7 +517,7 @@ void cpu_vm_stats_fold(int cpu)
92759
92760 v = p->vm_stat_diff[i];
92761 p->vm_stat_diff[i] = 0;
92762 - atomic_long_add(v, &zone->vm_stat[i]);
92763 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
92764 global_diff[i] += v;
92765 }
92766 }
92767 @@ -537,8 +537,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
92768 if (pset->vm_stat_diff[i]) {
92769 int v = pset->vm_stat_diff[i];
92770 pset->vm_stat_diff[i] = 0;
92771 - atomic_long_add(v, &zone->vm_stat[i]);
92772 - atomic_long_add(v, &vm_stat[i]);
92773 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
92774 + atomic_long_add_unchecked(v, &vm_stat[i]);
92775 }
92776 }
92777 #endif
92778 @@ -1281,10 +1281,20 @@ static int __init setup_vmstat(void)
92779 start_cpu_timer(cpu);
92780 #endif
92781 #ifdef CONFIG_PROC_FS
92782 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
92783 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
92784 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
92785 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
92786 + {
92787 + mode_t gr_mode = S_IRUGO;
92788 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
92789 + gr_mode = S_IRUSR;
92790 +#endif
92791 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
92792 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
92793 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
92794 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
92795 +#else
92796 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
92797 +#endif
92798 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
92799 + }
92800 #endif
92801 return 0;
92802 }
92803 diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
92804 index 61fc573..b5e47d0 100644
92805 --- a/net/8021q/vlan.c
92806 +++ b/net/8021q/vlan.c
92807 @@ -472,7 +472,7 @@ out:
92808 return NOTIFY_DONE;
92809 }
92810
92811 -static struct notifier_block vlan_notifier_block __read_mostly = {
92812 +static struct notifier_block vlan_notifier_block = {
92813 .notifier_call = vlan_device_event,
92814 };
92815
92816 @@ -547,8 +547,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
92817 err = -EPERM;
92818 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
92819 break;
92820 - if ((args.u.name_type >= 0) &&
92821 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
92822 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
92823 struct vlan_net *vn;
92824
92825 vn = net_generic(net, vlan_net_id);
92826 diff --git a/net/9p/mod.c b/net/9p/mod.c
92827 index 6ab36ae..6f1841b 100644
92828 --- a/net/9p/mod.c
92829 +++ b/net/9p/mod.c
92830 @@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
92831 void v9fs_register_trans(struct p9_trans_module *m)
92832 {
92833 spin_lock(&v9fs_trans_lock);
92834 - list_add_tail(&m->list, &v9fs_trans_list);
92835 + pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
92836 spin_unlock(&v9fs_trans_lock);
92837 }
92838 EXPORT_SYMBOL(v9fs_register_trans);
92839 @@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
92840 void v9fs_unregister_trans(struct p9_trans_module *m)
92841 {
92842 spin_lock(&v9fs_trans_lock);
92843 - list_del_init(&m->list);
92844 + pax_list_del_init((struct list_head *)&m->list);
92845 spin_unlock(&v9fs_trans_lock);
92846 }
92847 EXPORT_SYMBOL(v9fs_unregister_trans);
92848 diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
92849 index 3ffda1b..fceac96 100644
92850 --- a/net/9p/trans_fd.c
92851 +++ b/net/9p/trans_fd.c
92852 @@ -432,7 +432,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
92853 oldfs = get_fs();
92854 set_fs(get_ds());
92855 /* The cast to a user pointer is valid due to the set_fs() */
92856 - ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
92857 + ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
92858 set_fs(oldfs);
92859
92860 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
92861 diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
92862 index 876fbe8..8bbea9f 100644
92863 --- a/net/atm/atm_misc.c
92864 +++ b/net/atm/atm_misc.c
92865 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
92866 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
92867 return 1;
92868 atm_return(vcc, truesize);
92869 - atomic_inc(&vcc->stats->rx_drop);
92870 + atomic_inc_unchecked(&vcc->stats->rx_drop);
92871 return 0;
92872 }
92873 EXPORT_SYMBOL(atm_charge);
92874 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
92875 }
92876 }
92877 atm_return(vcc, guess);
92878 - atomic_inc(&vcc->stats->rx_drop);
92879 + atomic_inc_unchecked(&vcc->stats->rx_drop);
92880 return NULL;
92881 }
92882 EXPORT_SYMBOL(atm_alloc_charge);
92883 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
92884
92885 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
92886 {
92887 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
92888 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
92889 __SONET_ITEMS
92890 #undef __HANDLE_ITEM
92891 }
92892 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
92893
92894 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
92895 {
92896 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
92897 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
92898 __SONET_ITEMS
92899 #undef __HANDLE_ITEM
92900 }
92901 diff --git a/net/atm/lec.h b/net/atm/lec.h
92902 index 4149db1..f2ab682 100644
92903 --- a/net/atm/lec.h
92904 +++ b/net/atm/lec.h
92905 @@ -48,7 +48,7 @@ struct lane2_ops {
92906 const u8 *tlvs, u32 sizeoftlvs);
92907 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
92908 const u8 *tlvs, u32 sizeoftlvs);
92909 -};
92910 +} __no_const;
92911
92912 /*
92913 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
92914 diff --git a/net/atm/proc.c b/net/atm/proc.c
92915 index bbb6461..cf04016 100644
92916 --- a/net/atm/proc.c
92917 +++ b/net/atm/proc.c
92918 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
92919 const struct k_atm_aal_stats *stats)
92920 {
92921 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
92922 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
92923 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
92924 - atomic_read(&stats->rx_drop));
92925 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
92926 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
92927 + atomic_read_unchecked(&stats->rx_drop));
92928 }
92929
92930 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
92931 diff --git a/net/atm/resources.c b/net/atm/resources.c
92932 index 0447d5d..3cf4728 100644
92933 --- a/net/atm/resources.c
92934 +++ b/net/atm/resources.c
92935 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
92936 static void copy_aal_stats(struct k_atm_aal_stats *from,
92937 struct atm_aal_stats *to)
92938 {
92939 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
92940 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
92941 __AAL_STAT_ITEMS
92942 #undef __HANDLE_ITEM
92943 }
92944 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
92945 static void subtract_aal_stats(struct k_atm_aal_stats *from,
92946 struct atm_aal_stats *to)
92947 {
92948 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
92949 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
92950 __AAL_STAT_ITEMS
92951 #undef __HANDLE_ITEM
92952 }
92953 diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
92954 index 919a5ce..cc6b444 100644
92955 --- a/net/ax25/sysctl_net_ax25.c
92956 +++ b/net/ax25/sysctl_net_ax25.c
92957 @@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
92958 {
92959 char path[sizeof("net/ax25/") + IFNAMSIZ];
92960 int k;
92961 - struct ctl_table *table;
92962 + ctl_table_no_const *table;
92963
92964 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
92965 if (!table)
92966 diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
92967 index 0a8a80c..f7e89aa 100644
92968 --- a/net/batman-adv/bat_iv_ogm.c
92969 +++ b/net/batman-adv/bat_iv_ogm.c
92970 @@ -121,7 +121,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
92971
92972 /* randomize initial seqno to avoid collision */
92973 get_random_bytes(&random_seqno, sizeof(random_seqno));
92974 - atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
92975 + atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
92976
92977 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
92978 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
92979 @@ -703,9 +703,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
92980 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
92981
92982 /* change sequence number to network order */
92983 - seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
92984 + seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
92985 batadv_ogm_packet->seqno = htonl(seqno);
92986 - atomic_inc(&hard_iface->bat_iv.ogm_seqno);
92987 + atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
92988
92989 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
92990 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
92991 @@ -1111,7 +1111,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
92992 return;
92993
92994 /* could be changed by schedule_own_packet() */
92995 - if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
92996 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
92997
92998 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
92999 has_directlink_flag = 1;
93000 diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
93001 index c478e6b..469fd2f 100644
93002 --- a/net/batman-adv/hard-interface.c
93003 +++ b/net/batman-adv/hard-interface.c
93004 @@ -453,7 +453,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
93005 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
93006 dev_add_pack(&hard_iface->batman_adv_ptype);
93007
93008 - atomic_set(&hard_iface->frag_seqno, 1);
93009 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
93010 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
93011 hard_iface->net_dev->name);
93012
93013 diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
93014 index 813db4e..847edac 100644
93015 --- a/net/batman-adv/soft-interface.c
93016 +++ b/net/batman-adv/soft-interface.c
93017 @@ -263,7 +263,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
93018 primary_if->net_dev->dev_addr, ETH_ALEN);
93019
93020 /* set broadcast sequence number */
93021 - seqno = atomic_inc_return(&bat_priv->bcast_seqno);
93022 + seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
93023 bcast_packet->seqno = htonl(seqno);
93024
93025 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
93026 @@ -483,7 +483,7 @@ static int batadv_softif_init_late(struct net_device *dev)
93027 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
93028
93029 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
93030 - atomic_set(&bat_priv->bcast_seqno, 1);
93031 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
93032 atomic_set(&bat_priv->tt.vn, 0);
93033 atomic_set(&bat_priv->tt.local_changes, 0);
93034 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
93035 diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
93036 index b2c94e1..3d47e07 100644
93037 --- a/net/batman-adv/types.h
93038 +++ b/net/batman-adv/types.h
93039 @@ -51,7 +51,7 @@
93040 struct batadv_hard_iface_bat_iv {
93041 unsigned char *ogm_buff;
93042 int ogm_buff_len;
93043 - atomic_t ogm_seqno;
93044 + atomic_unchecked_t ogm_seqno;
93045 };
93046
93047 /**
93048 @@ -76,7 +76,7 @@ struct batadv_hard_iface {
93049 int16_t if_num;
93050 char if_status;
93051 struct net_device *net_dev;
93052 - atomic_t frag_seqno;
93053 + atomic_unchecked_t frag_seqno;
93054 uint8_t num_bcasts;
93055 struct kobject *hardif_obj;
93056 atomic_t refcount;
93057 @@ -560,7 +560,7 @@ struct batadv_priv {
93058 #ifdef CONFIG_BATMAN_ADV_DEBUG
93059 atomic_t log_level;
93060 #endif
93061 - atomic_t bcast_seqno;
93062 + atomic_unchecked_t bcast_seqno;
93063 atomic_t bcast_queue_left;
93064 atomic_t batman_queue_left;
93065 char num_ifaces;
93066 diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
93067 index 48b31d3..62a0bcb 100644
93068 --- a/net/batman-adv/unicast.c
93069 +++ b/net/batman-adv/unicast.c
93070 @@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
93071 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
93072 frag2->flags = large_tail;
93073
93074 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
93075 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
93076 frag1->seqno = htons(seqno - 1);
93077 frag2->seqno = htons(seqno);
93078
93079 diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
93080 index fa4bf66..e92948f 100644
93081 --- a/net/bluetooth/hci_sock.c
93082 +++ b/net/bluetooth/hci_sock.c
93083 @@ -932,7 +932,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
93084 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
93085 }
93086
93087 - len = min_t(unsigned int, len, sizeof(uf));
93088 + len = min((size_t)len, sizeof(uf));
93089 if (copy_from_user(&uf, optval, len)) {
93090 err = -EFAULT;
93091 break;
93092 diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
93093 index 63fa111..b166ec6 100644
93094 --- a/net/bluetooth/l2cap_core.c
93095 +++ b/net/bluetooth/l2cap_core.c
93096 @@ -3511,8 +3511,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
93097 break;
93098
93099 case L2CAP_CONF_RFC:
93100 - if (olen == sizeof(rfc))
93101 - memcpy(&rfc, (void *)val, olen);
93102 + if (olen != sizeof(rfc))
93103 + break;
93104 +
93105 + memcpy(&rfc, (void *)val, olen);
93106
93107 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
93108 rfc.mode != chan->mode)
93109 diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
93110 index 0098af8..fb5a31f 100644
93111 --- a/net/bluetooth/l2cap_sock.c
93112 +++ b/net/bluetooth/l2cap_sock.c
93113 @@ -485,7 +485,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
93114 struct sock *sk = sock->sk;
93115 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
93116 struct l2cap_options opts;
93117 - int len, err = 0;
93118 + int err = 0;
93119 + size_t len = optlen;
93120 u32 opt;
93121
93122 BT_DBG("sk %p", sk);
93123 @@ -507,7 +508,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
93124 opts.max_tx = chan->max_tx;
93125 opts.txwin_size = chan->tx_win;
93126
93127 - len = min_t(unsigned int, sizeof(opts), optlen);
93128 + len = min(sizeof(opts), len);
93129 if (copy_from_user((char *) &opts, optval, len)) {
93130 err = -EFAULT;
93131 break;
93132 @@ -587,7 +588,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
93133 struct bt_security sec;
93134 struct bt_power pwr;
93135 struct l2cap_conn *conn;
93136 - int len, err = 0;
93137 + int err = 0;
93138 + size_t len = optlen;
93139 u32 opt;
93140
93141 BT_DBG("sk %p", sk);
93142 @@ -610,7 +612,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
93143
93144 sec.level = BT_SECURITY_LOW;
93145
93146 - len = min_t(unsigned int, sizeof(sec), optlen);
93147 + len = min(sizeof(sec), len);
93148 if (copy_from_user((char *) &sec, optval, len)) {
93149 err = -EFAULT;
93150 break;
93151 @@ -707,7 +709,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
93152
93153 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
93154
93155 - len = min_t(unsigned int, sizeof(pwr), optlen);
93156 + len = min(sizeof(pwr), len);
93157 if (copy_from_user((char *) &pwr, optval, len)) {
93158 err = -EFAULT;
93159 break;
93160 diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
93161 index c1c6028..17e8dcc 100644
93162 --- a/net/bluetooth/rfcomm/sock.c
93163 +++ b/net/bluetooth/rfcomm/sock.c
93164 @@ -665,7 +665,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
93165 struct sock *sk = sock->sk;
93166 struct bt_security sec;
93167 int err = 0;
93168 - size_t len;
93169 + size_t len = optlen;
93170 u32 opt;
93171
93172 BT_DBG("sk %p", sk);
93173 @@ -687,7 +687,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
93174
93175 sec.level = BT_SECURITY_LOW;
93176
93177 - len = min_t(unsigned int, sizeof(sec), optlen);
93178 + len = min(sizeof(sec), len);
93179 if (copy_from_user((char *) &sec, optval, len)) {
93180 err = -EFAULT;
93181 break;
93182 diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
93183 index 84fcf9f..e389b27 100644
93184 --- a/net/bluetooth/rfcomm/tty.c
93185 +++ b/net/bluetooth/rfcomm/tty.c
93186 @@ -684,7 +684,7 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
93187 BT_DBG("tty %p id %d", tty, tty->index);
93188
93189 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
93190 - dev->channel, dev->port.count);
93191 + dev->channel, atomic_read(&dev->port.count));
93192
93193 err = tty_port_open(&dev->port, tty, filp);
93194 if (err)
93195 @@ -707,7 +707,7 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
93196 struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data;
93197
93198 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
93199 - dev->port.count);
93200 + atomic_read(&dev->port.count));
93201
93202 tty_port_close(&dev->port, tty, filp);
93203 }
93204 diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
93205 index 5180938..7c470c3 100644
93206 --- a/net/bridge/netfilter/ebt_ulog.c
93207 +++ b/net/bridge/netfilter/ebt_ulog.c
93208 @@ -181,6 +181,7 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
93209 ub->qlen++;
93210
93211 pm = nlmsg_data(nlh);
93212 + memset(pm, 0, sizeof(*pm));
93213
93214 /* Fill in the ulog data */
93215 pm->version = EBT_ULOG_VERSION;
93216 @@ -193,8 +194,6 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
93217 pm->hook = hooknr;
93218 if (uloginfo->prefix != NULL)
93219 strcpy(pm->prefix, uloginfo->prefix);
93220 - else
93221 - *(pm->prefix) = '\0';
93222
93223 if (in) {
93224 strcpy(pm->physindev, in->name);
93225 @@ -204,16 +203,14 @@ static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
93226 strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
93227 else
93228 strcpy(pm->indev, in->name);
93229 - } else
93230 - pm->indev[0] = pm->physindev[0] = '\0';
93231 + }
93232
93233 if (out) {
93234 /* If out exists, then out is a bridge port */
93235 strcpy(pm->physoutdev, out->name);
93236 /* rcu_read_lock()ed by nf_hook_slow */
93237 strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
93238 - } else
93239 - pm->outdev[0] = pm->physoutdev[0] = '\0';
93240 + }
93241
93242 if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
93243 BUG();
93244 diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
93245 index ac78024..161a80c 100644
93246 --- a/net/bridge/netfilter/ebtables.c
93247 +++ b/net/bridge/netfilter/ebtables.c
93248 @@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
93249 tmp.valid_hooks = t->table->valid_hooks;
93250 }
93251 mutex_unlock(&ebt_mutex);
93252 - if (copy_to_user(user, &tmp, *len) != 0){
93253 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
93254 BUGPRINT("c2u Didn't work\n");
93255 ret = -EFAULT;
93256 break;
93257 @@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
93258 goto out;
93259 tmp.valid_hooks = t->valid_hooks;
93260
93261 - if (copy_to_user(user, &tmp, *len) != 0) {
93262 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
93263 ret = -EFAULT;
93264 break;
93265 }
93266 @@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
93267 tmp.entries_size = t->table->entries_size;
93268 tmp.valid_hooks = t->table->valid_hooks;
93269
93270 - if (copy_to_user(user, &tmp, *len) != 0) {
93271 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
93272 ret = -EFAULT;
93273 break;
93274 }
93275 diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
93276 index 0f45522..dab651f 100644
93277 --- a/net/caif/cfctrl.c
93278 +++ b/net/caif/cfctrl.c
93279 @@ -10,6 +10,7 @@
93280 #include <linux/spinlock.h>
93281 #include <linux/slab.h>
93282 #include <linux/pkt_sched.h>
93283 +#include <linux/sched.h>
93284 #include <net/caif/caif_layer.h>
93285 #include <net/caif/cfpkt.h>
93286 #include <net/caif/cfctrl.h>
93287 @@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
93288 memset(&dev_info, 0, sizeof(dev_info));
93289 dev_info.id = 0xff;
93290 cfsrvl_init(&this->serv, 0, &dev_info, false);
93291 - atomic_set(&this->req_seq_no, 1);
93292 - atomic_set(&this->rsp_seq_no, 1);
93293 + atomic_set_unchecked(&this->req_seq_no, 1);
93294 + atomic_set_unchecked(&this->rsp_seq_no, 1);
93295 this->serv.layer.receive = cfctrl_recv;
93296 sprintf(this->serv.layer.name, "ctrl");
93297 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
93298 @@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
93299 struct cfctrl_request_info *req)
93300 {
93301 spin_lock_bh(&ctrl->info_list_lock);
93302 - atomic_inc(&ctrl->req_seq_no);
93303 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
93304 + atomic_inc_unchecked(&ctrl->req_seq_no);
93305 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
93306 list_add_tail(&req->list, &ctrl->list);
93307 spin_unlock_bh(&ctrl->info_list_lock);
93308 }
93309 @@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
93310 if (p != first)
93311 pr_warn("Requests are not received in order\n");
93312
93313 - atomic_set(&ctrl->rsp_seq_no,
93314 + atomic_set_unchecked(&ctrl->rsp_seq_no,
93315 p->sequence_no);
93316 list_del(&p->list);
93317 goto out;
93318 diff --git a/net/can/af_can.c b/net/can/af_can.c
93319 index 3ab8dd2..b9aef13 100644
93320 --- a/net/can/af_can.c
93321 +++ b/net/can/af_can.c
93322 @@ -862,7 +862,7 @@ static const struct net_proto_family can_family_ops = {
93323 };
93324
93325 /* notifier block for netdevice event */
93326 -static struct notifier_block can_netdev_notifier __read_mostly = {
93327 +static struct notifier_block can_netdev_notifier = {
93328 .notifier_call = can_notifier,
93329 };
93330
93331 diff --git a/net/can/gw.c b/net/can/gw.c
93332 index 3f9b0f3..fc6d4fa 100644
93333 --- a/net/can/gw.c
93334 +++ b/net/can/gw.c
93335 @@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
93336 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
93337
93338 static HLIST_HEAD(cgw_list);
93339 -static struct notifier_block notifier;
93340
93341 static struct kmem_cache *cgw_cache __read_mostly;
93342
93343 @@ -954,6 +953,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
93344 return err;
93345 }
93346
93347 +static struct notifier_block notifier = {
93348 + .notifier_call = cgw_notifier
93349 +};
93350 +
93351 static __init int cgw_module_init(void)
93352 {
93353 /* sanitize given module parameter */
93354 @@ -969,7 +972,6 @@ static __init int cgw_module_init(void)
93355 return -ENOMEM;
93356
93357 /* set notifier */
93358 - notifier.notifier_call = cgw_notifier;
93359 register_netdevice_notifier(&notifier);
93360
93361 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
93362 diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
93363 index 4a5df7b..9ad1f1d 100644
93364 --- a/net/ceph/messenger.c
93365 +++ b/net/ceph/messenger.c
93366 @@ -186,7 +186,7 @@ static void con_fault(struct ceph_connection *con);
93367 #define MAX_ADDR_STR_LEN 64 /* 54 is enough */
93368
93369 static char addr_str[ADDR_STR_COUNT][MAX_ADDR_STR_LEN];
93370 -static atomic_t addr_str_seq = ATOMIC_INIT(0);
93371 +static atomic_unchecked_t addr_str_seq = ATOMIC_INIT(0);
93372
93373 static struct page *zero_page; /* used in certain error cases */
93374
93375 @@ -197,7 +197,7 @@ const char *ceph_pr_addr(const struct sockaddr_storage *ss)
93376 struct sockaddr_in *in4 = (struct sockaddr_in *) ss;
93377 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) ss;
93378
93379 - i = atomic_inc_return(&addr_str_seq) & ADDR_STR_COUNT_MASK;
93380 + i = atomic_inc_return_unchecked(&addr_str_seq) & ADDR_STR_COUNT_MASK;
93381 s = addr_str[i];
93382
93383 switch (ss->ss_family) {
93384 diff --git a/net/compat.c b/net/compat.c
93385 index dd32e34..6066f87 100644
93386 --- a/net/compat.c
93387 +++ b/net/compat.c
93388 @@ -73,9 +73,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
93389 return -EFAULT;
93390 if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
93391 kmsg->msg_namelen = sizeof(struct sockaddr_storage);
93392 - kmsg->msg_name = compat_ptr(tmp1);
93393 - kmsg->msg_iov = compat_ptr(tmp2);
93394 - kmsg->msg_control = compat_ptr(tmp3);
93395 + kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
93396 + kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
93397 + kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
93398 return 0;
93399 }
93400
93401 @@ -87,7 +87,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
93402
93403 if (kern_msg->msg_namelen) {
93404 if (mode == VERIFY_READ) {
93405 - int err = move_addr_to_kernel(kern_msg->msg_name,
93406 + int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
93407 kern_msg->msg_namelen,
93408 kern_address);
93409 if (err < 0)
93410 @@ -99,7 +99,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
93411 kern_msg->msg_name = NULL;
93412
93413 tot_len = iov_from_user_compat_to_kern(kern_iov,
93414 - (struct compat_iovec __user *)kern_msg->msg_iov,
93415 + (struct compat_iovec __force_user *)kern_msg->msg_iov,
93416 kern_msg->msg_iovlen);
93417 if (tot_len >= 0)
93418 kern_msg->msg_iov = kern_iov;
93419 @@ -119,20 +119,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
93420
93421 #define CMSG_COMPAT_FIRSTHDR(msg) \
93422 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
93423 - (struct compat_cmsghdr __user *)((msg)->msg_control) : \
93424 + (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
93425 (struct compat_cmsghdr __user *)NULL)
93426
93427 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
93428 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
93429 (ucmlen) <= (unsigned long) \
93430 ((mhdr)->msg_controllen - \
93431 - ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
93432 + ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
93433
93434 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
93435 struct compat_cmsghdr __user *cmsg, int cmsg_len)
93436 {
93437 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
93438 - if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
93439 + if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
93440 msg->msg_controllen)
93441 return NULL;
93442 return (struct compat_cmsghdr __user *)ptr;
93443 @@ -222,7 +222,7 @@ Efault:
93444
93445 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
93446 {
93447 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
93448 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
93449 struct compat_cmsghdr cmhdr;
93450 struct compat_timeval ctv;
93451 struct compat_timespec cts[3];
93452 @@ -278,7 +278,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
93453
93454 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
93455 {
93456 - struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
93457 + struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
93458 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
93459 int fdnum = scm->fp->count;
93460 struct file **fp = scm->fp->fp;
93461 @@ -366,7 +366,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
93462 return -EFAULT;
93463 old_fs = get_fs();
93464 set_fs(KERNEL_DS);
93465 - err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
93466 + err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
93467 set_fs(old_fs);
93468
93469 return err;
93470 @@ -427,7 +427,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
93471 len = sizeof(ktime);
93472 old_fs = get_fs();
93473 set_fs(KERNEL_DS);
93474 - err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
93475 + err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
93476 set_fs(old_fs);
93477
93478 if (!err) {
93479 @@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
93480 case MCAST_JOIN_GROUP:
93481 case MCAST_LEAVE_GROUP:
93482 {
93483 - struct compat_group_req __user *gr32 = (void *)optval;
93484 + struct compat_group_req __user *gr32 = (void __user *)optval;
93485 struct group_req __user *kgr =
93486 compat_alloc_user_space(sizeof(struct group_req));
93487 u32 interface;
93488 @@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
93489 case MCAST_BLOCK_SOURCE:
93490 case MCAST_UNBLOCK_SOURCE:
93491 {
93492 - struct compat_group_source_req __user *gsr32 = (void *)optval;
93493 + struct compat_group_source_req __user *gsr32 = (void __user *)optval;
93494 struct group_source_req __user *kgsr = compat_alloc_user_space(
93495 sizeof(struct group_source_req));
93496 u32 interface;
93497 @@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
93498 }
93499 case MCAST_MSFILTER:
93500 {
93501 - struct compat_group_filter __user *gf32 = (void *)optval;
93502 + struct compat_group_filter __user *gf32 = (void __user *)optval;
93503 struct group_filter __user *kgf;
93504 u32 interface, fmode, numsrc;
93505
93506 @@ -650,7 +650,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
93507 char __user *optval, int __user *optlen,
93508 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
93509 {
93510 - struct compat_group_filter __user *gf32 = (void *)optval;
93511 + struct compat_group_filter __user *gf32 = (void __user *)optval;
93512 struct group_filter __user *kgf;
93513 int __user *koptlen;
93514 u32 interface, fmode, numsrc;
93515 @@ -808,7 +808,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
93516
93517 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
93518 return -EINVAL;
93519 - if (copy_from_user(a, args, nas[call]))
93520 + if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
93521 return -EFAULT;
93522 a0 = a[0];
93523 a1 = a[1];
93524 diff --git a/net/core/datagram.c b/net/core/datagram.c
93525 index af814e7..3d761de 100644
93526 --- a/net/core/datagram.c
93527 +++ b/net/core/datagram.c
93528 @@ -301,7 +301,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
93529 }
93530
93531 kfree_skb(skb);
93532 - atomic_inc(&sk->sk_drops);
93533 + atomic_inc_unchecked(&sk->sk_drops);
93534 sk_mem_reclaim_partial(sk);
93535
93536 return err;
93537 diff --git a/net/core/dev.c b/net/core/dev.c
93538 index 3d13874..6e78dc7 100644
93539 --- a/net/core/dev.c
93540 +++ b/net/core/dev.c
93541 @@ -1680,14 +1680,14 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
93542 {
93543 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
93544 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
93545 - atomic_long_inc(&dev->rx_dropped);
93546 + atomic_long_inc_unchecked(&dev->rx_dropped);
93547 kfree_skb(skb);
93548 return NET_RX_DROP;
93549 }
93550 }
93551
93552 if (unlikely(!is_skb_forwardable(dev, skb))) {
93553 - atomic_long_inc(&dev->rx_dropped);
93554 + atomic_long_inc_unchecked(&dev->rx_dropped);
93555 kfree_skb(skb);
93556 return NET_RX_DROP;
93557 }
93558 @@ -2428,7 +2428,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
93559
93560 struct dev_gso_cb {
93561 void (*destructor)(struct sk_buff *skb);
93562 -};
93563 +} __no_const;
93564
93565 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
93566
93567 @@ -3203,7 +3203,7 @@ enqueue:
93568
93569 local_irq_restore(flags);
93570
93571 - atomic_long_inc(&skb->dev->rx_dropped);
93572 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
93573 kfree_skb(skb);
93574 return NET_RX_DROP;
93575 }
93576 @@ -3275,7 +3275,7 @@ int netif_rx_ni(struct sk_buff *skb)
93577 }
93578 EXPORT_SYMBOL(netif_rx_ni);
93579
93580 -static void net_tx_action(struct softirq_action *h)
93581 +static __latent_entropy void net_tx_action(void)
93582 {
93583 struct softnet_data *sd = &__get_cpu_var(softnet_data);
93584
93585 @@ -3609,7 +3609,7 @@ ncls:
93586 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
93587 } else {
93588 drop:
93589 - atomic_long_inc(&skb->dev->rx_dropped);
93590 + atomic_long_inc_unchecked(&skb->dev->rx_dropped);
93591 kfree_skb(skb);
93592 /* Jamal, now you will not able to escape explaining
93593 * me how you were going to use this. :-)
93594 @@ -4269,7 +4269,7 @@ void netif_napi_del(struct napi_struct *napi)
93595 }
93596 EXPORT_SYMBOL(netif_napi_del);
93597
93598 -static void net_rx_action(struct softirq_action *h)
93599 +static __latent_entropy void net_rx_action(void)
93600 {
93601 struct softnet_data *sd = &__get_cpu_var(softnet_data);
93602 unsigned long time_limit = jiffies + 2;
93603 @@ -5973,7 +5973,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
93604 } else {
93605 netdev_stats_to_stats64(storage, &dev->stats);
93606 }
93607 - storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
93608 + storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
93609 return storage;
93610 }
93611 EXPORT_SYMBOL(dev_get_stats);
93612 diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
93613 index 5b7d0e1..cb960fc 100644
93614 --- a/net/core/dev_ioctl.c
93615 +++ b/net/core/dev_ioctl.c
93616 @@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
93617 if (no_module && capable(CAP_NET_ADMIN))
93618 no_module = request_module("netdev-%s", name);
93619 if (no_module && capable(CAP_SYS_MODULE)) {
93620 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
93621 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
93622 +#else
93623 if (!request_module("%s", name))
93624 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
93625 name);
93626 +#endif
93627 }
93628 }
93629 EXPORT_SYMBOL(dev_load);
93630 diff --git a/net/core/flow.c b/net/core/flow.c
93631 index dfa602c..3103d88 100644
93632 --- a/net/core/flow.c
93633 +++ b/net/core/flow.c
93634 @@ -61,7 +61,7 @@ struct flow_cache {
93635 struct timer_list rnd_timer;
93636 };
93637
93638 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
93639 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
93640 EXPORT_SYMBOL(flow_cache_genid);
93641 static struct flow_cache flow_cache_global;
93642 static struct kmem_cache *flow_cachep __read_mostly;
93643 @@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
93644
93645 static int flow_entry_valid(struct flow_cache_entry *fle)
93646 {
93647 - if (atomic_read(&flow_cache_genid) != fle->genid)
93648 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
93649 return 0;
93650 if (fle->object && !fle->object->ops->check(fle->object))
93651 return 0;
93652 @@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
93653 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
93654 fcp->hash_count++;
93655 }
93656 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
93657 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
93658 flo = fle->object;
93659 if (!flo)
93660 goto ret_object;
93661 @@ -279,7 +279,7 @@ nocache:
93662 }
93663 flo = resolver(net, key, family, dir, flo, ctx);
93664 if (fle) {
93665 - fle->genid = atomic_read(&flow_cache_genid);
93666 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
93667 if (!IS_ERR(flo))
93668 fle->object = flo;
93669 else
93670 diff --git a/net/core/iovec.c b/net/core/iovec.c
93671 index 7d84ea1..55385ae 100644
93672 --- a/net/core/iovec.c
93673 +++ b/net/core/iovec.c
93674 @@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
93675 if (m->msg_namelen) {
93676 if (mode == VERIFY_READ) {
93677 void __user *namep;
93678 - namep = (void __user __force *) m->msg_name;
93679 + namep = (void __force_user *) m->msg_name;
93680 err = move_addr_to_kernel(namep, m->msg_namelen,
93681 address);
93682 if (err < 0)
93683 @@ -55,7 +55,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
93684 }
93685
93686 size = m->msg_iovlen * sizeof(struct iovec);
93687 - if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
93688 + if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
93689 return -EFAULT;
93690
93691 m->msg_iov = iov;
93692 diff --git a/net/core/neighbour.c b/net/core/neighbour.c
93693 index 6072610..7374c18 100644
93694 --- a/net/core/neighbour.c
93695 +++ b/net/core/neighbour.c
93696 @@ -2774,7 +2774,7 @@ static int proc_unres_qlen(struct ctl_table *ctl, int write,
93697 void __user *buffer, size_t *lenp, loff_t *ppos)
93698 {
93699 int size, ret;
93700 - struct ctl_table tmp = *ctl;
93701 + ctl_table_no_const tmp = *ctl;
93702
93703 tmp.extra1 = &zero;
93704 tmp.extra2 = &unres_qlen_max;
93705 diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
93706 index 2bf8329..7960607 100644
93707 --- a/net/core/net-procfs.c
93708 +++ b/net/core/net-procfs.c
93709 @@ -283,8 +283,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
93710 else
93711 seq_printf(seq, "%04x", ntohs(pt->type));
93712
93713 +#ifdef CONFIG_GRKERNSEC_HIDESYM
93714 + seq_printf(seq, " %-8s %pf\n",
93715 + pt->dev ? pt->dev->name : "", NULL);
93716 +#else
93717 seq_printf(seq, " %-8s %pf\n",
93718 pt->dev ? pt->dev->name : "", pt->func);
93719 +#endif
93720 }
93721
93722 return 0;
93723 diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
93724 index d954b56..b0a0f7a 100644
93725 --- a/net/core/net-sysfs.c
93726 +++ b/net/core/net-sysfs.c
93727 @@ -1356,7 +1356,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
93728 }
93729 EXPORT_SYMBOL(netdev_class_remove_file);
93730
93731 -int netdev_kobject_init(void)
93732 +int __init netdev_kobject_init(void)
93733 {
93734 kobj_ns_type_register(&net_ns_type_operations);
93735 return class_register(&net_class);
93736 diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
93737 index 81d3a9a..a0bd7a8 100644
93738 --- a/net/core/net_namespace.c
93739 +++ b/net/core/net_namespace.c
93740 @@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
93741 int error;
93742 LIST_HEAD(net_exit_list);
93743
93744 - list_add_tail(&ops->list, list);
93745 + pax_list_add_tail((struct list_head *)&ops->list, list);
93746 if (ops->init || (ops->id && ops->size)) {
93747 for_each_net(net) {
93748 error = ops_init(ops, net);
93749 @@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
93750
93751 out_undo:
93752 /* If I have an error cleanup all namespaces I initialized */
93753 - list_del(&ops->list);
93754 + pax_list_del((struct list_head *)&ops->list);
93755 ops_exit_list(ops, &net_exit_list);
93756 ops_free_list(ops, &net_exit_list);
93757 return error;
93758 @@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
93759 struct net *net;
93760 LIST_HEAD(net_exit_list);
93761
93762 - list_del(&ops->list);
93763 + pax_list_del((struct list_head *)&ops->list);
93764 for_each_net(net)
93765 list_add_tail(&net->exit_list, &net_exit_list);
93766 ops_exit_list(ops, &net_exit_list);
93767 @@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
93768 mutex_lock(&net_mutex);
93769 error = register_pernet_operations(&pernet_list, ops);
93770 if (!error && (first_device == &pernet_list))
93771 - first_device = &ops->list;
93772 + first_device = (struct list_head *)&ops->list;
93773 mutex_unlock(&net_mutex);
93774 return error;
93775 }
93776 diff --git a/net/core/netpoll.c b/net/core/netpoll.c
93777 index fc75c9e..8c8e9be 100644
93778 --- a/net/core/netpoll.c
93779 +++ b/net/core/netpoll.c
93780 @@ -428,7 +428,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
93781 struct udphdr *udph;
93782 struct iphdr *iph;
93783 struct ethhdr *eth;
93784 - static atomic_t ip_ident;
93785 + static atomic_unchecked_t ip_ident;
93786 struct ipv6hdr *ip6h;
93787
93788 udp_len = len + sizeof(*udph);
93789 @@ -499,7 +499,7 @@ void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
93790 put_unaligned(0x45, (unsigned char *)iph);
93791 iph->tos = 0;
93792 put_unaligned(htons(ip_len), &(iph->tot_len));
93793 - iph->id = htons(atomic_inc_return(&ip_ident));
93794 + iph->id = htons(atomic_inc_return_unchecked(&ip_ident));
93795 iph->frag_off = 0;
93796 iph->ttl = 64;
93797 iph->protocol = IPPROTO_UDP;
93798 diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
93799 index 2a0e21d..6ad7642 100644
93800 --- a/net/core/rtnetlink.c
93801 +++ b/net/core/rtnetlink.c
93802 @@ -58,7 +58,7 @@ struct rtnl_link {
93803 rtnl_doit_func doit;
93804 rtnl_dumpit_func dumpit;
93805 rtnl_calcit_func calcit;
93806 -};
93807 +} __no_const;
93808
93809 static DEFINE_MUTEX(rtnl_mutex);
93810
93811 @@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
93812 if (rtnl_link_ops_get(ops->kind))
93813 return -EEXIST;
93814
93815 - if (!ops->dellink)
93816 - ops->dellink = unregister_netdevice_queue;
93817 + if (!ops->dellink) {
93818 + pax_open_kernel();
93819 + *(void **)&ops->dellink = unregister_netdevice_queue;
93820 + pax_close_kernel();
93821 + }
93822
93823 - list_add_tail(&ops->list, &link_ops);
93824 + pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
93825 return 0;
93826 }
93827 EXPORT_SYMBOL_GPL(__rtnl_link_register);
93828 @@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
93829 for_each_net(net) {
93830 __rtnl_kill_links(net, ops);
93831 }
93832 - list_del(&ops->list);
93833 + pax_list_del((struct list_head *)&ops->list);
93834 }
93835 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
93836
93837 diff --git a/net/core/scm.c b/net/core/scm.c
93838 index b442e7e..6f5b5a2 100644
93839 --- a/net/core/scm.c
93840 +++ b/net/core/scm.c
93841 @@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
93842 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
93843 {
93844 struct cmsghdr __user *cm
93845 - = (__force struct cmsghdr __user *)msg->msg_control;
93846 + = (struct cmsghdr __force_user *)msg->msg_control;
93847 struct cmsghdr cmhdr;
93848 int cmlen = CMSG_LEN(len);
93849 int err;
93850 @@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
93851 err = -EFAULT;
93852 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
93853 goto out;
93854 - if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
93855 + if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
93856 goto out;
93857 cmlen = CMSG_SPACE(len);
93858 if (msg->msg_controllen < cmlen)
93859 @@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
93860 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
93861 {
93862 struct cmsghdr __user *cm
93863 - = (__force struct cmsghdr __user*)msg->msg_control;
93864 + = (struct cmsghdr __force_user *)msg->msg_control;
93865
93866 int fdmax = 0;
93867 int fdnum = scm->fp->count;
93868 @@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
93869 if (fdnum < fdmax)
93870 fdmax = fdnum;
93871
93872 - for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
93873 + for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
93874 i++, cmfptr++)
93875 {
93876 struct socket *sock;
93877 diff --git a/net/core/skbuff.c b/net/core/skbuff.c
93878 index c28c7fe..a399a6d 100644
93879 --- a/net/core/skbuff.c
93880 +++ b/net/core/skbuff.c
93881 @@ -3104,13 +3104,15 @@ void __init skb_init(void)
93882 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
93883 sizeof(struct sk_buff),
93884 0,
93885 - SLAB_HWCACHE_ALIGN|SLAB_PANIC,
93886 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|
93887 + SLAB_NO_SANITIZE,
93888 NULL);
93889 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
93890 (2*sizeof(struct sk_buff)) +
93891 sizeof(atomic_t),
93892 0,
93893 - SLAB_HWCACHE_ALIGN|SLAB_PANIC,
93894 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|
93895 + SLAB_NO_SANITIZE,
93896 NULL);
93897 }
93898
93899 @@ -3541,6 +3543,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
93900 skb->tstamp.tv64 = 0;
93901 skb->pkt_type = PACKET_HOST;
93902 skb->skb_iif = 0;
93903 + skb->local_df = 0;
93904 skb_dst_drop(skb);
93905 skb->mark = 0;
93906 secpath_reset(skb);
93907 diff --git a/net/core/sock.c b/net/core/sock.c
93908 index 0b39e7a..5e9f91e 100644
93909 --- a/net/core/sock.c
93910 +++ b/net/core/sock.c
93911 @@ -393,7 +393,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
93912 struct sk_buff_head *list = &sk->sk_receive_queue;
93913
93914 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
93915 - atomic_inc(&sk->sk_drops);
93916 + atomic_inc_unchecked(&sk->sk_drops);
93917 trace_sock_rcvqueue_full(sk, skb);
93918 return -ENOMEM;
93919 }
93920 @@ -403,7 +403,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
93921 return err;
93922
93923 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
93924 - atomic_inc(&sk->sk_drops);
93925 + atomic_inc_unchecked(&sk->sk_drops);
93926 return -ENOBUFS;
93927 }
93928
93929 @@ -423,7 +423,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
93930 skb_dst_force(skb);
93931
93932 spin_lock_irqsave(&list->lock, flags);
93933 - skb->dropcount = atomic_read(&sk->sk_drops);
93934 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
93935 __skb_queue_tail(list, skb);
93936 spin_unlock_irqrestore(&list->lock, flags);
93937
93938 @@ -443,7 +443,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
93939 skb->dev = NULL;
93940
93941 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
93942 - atomic_inc(&sk->sk_drops);
93943 + atomic_inc_unchecked(&sk->sk_drops);
93944 goto discard_and_relse;
93945 }
93946 if (nested)
93947 @@ -461,7 +461,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
93948 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
93949 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
93950 bh_unlock_sock(sk);
93951 - atomic_inc(&sk->sk_drops);
93952 + atomic_inc_unchecked(&sk->sk_drops);
93953 goto discard_and_relse;
93954 }
93955
93956 @@ -949,12 +949,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
93957 struct timeval tm;
93958 } v;
93959
93960 - int lv = sizeof(int);
93961 - int len;
93962 + unsigned int lv = sizeof(int);
93963 + unsigned int len;
93964
93965 if (get_user(len, optlen))
93966 return -EFAULT;
93967 - if (len < 0)
93968 + if (len > INT_MAX)
93969 return -EINVAL;
93970
93971 memset(&v, 0, sizeof(v));
93972 @@ -1106,11 +1106,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
93973
93974 case SO_PEERNAME:
93975 {
93976 - char address[128];
93977 + char address[_K_SS_MAXSIZE];
93978
93979 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
93980 return -ENOTCONN;
93981 - if (lv < len)
93982 + if (lv < len || sizeof address < len)
93983 return -EINVAL;
93984 if (copy_to_user(optval, address, len))
93985 return -EFAULT;
93986 @@ -1183,7 +1183,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
93987
93988 if (len > lv)
93989 len = lv;
93990 - if (copy_to_user(optval, &v, len))
93991 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
93992 return -EFAULT;
93993 lenout:
93994 if (put_user(len, optlen))
93995 @@ -2326,7 +2326,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
93996 */
93997 smp_wmb();
93998 atomic_set(&sk->sk_refcnt, 1);
93999 - atomic_set(&sk->sk_drops, 0);
94000 + atomic_set_unchecked(&sk->sk_drops, 0);
94001 }
94002 EXPORT_SYMBOL(sock_init_data);
94003
94004 @@ -2451,6 +2451,7 @@ void sock_enable_timestamp(struct sock *sk, int flag)
94005 int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
94006 int level, int type)
94007 {
94008 + struct sock_extended_err ee;
94009 struct sock_exterr_skb *serr;
94010 struct sk_buff *skb, *skb2;
94011 int copied, err;
94012 @@ -2472,7 +2473,8 @@ int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len,
94013 sock_recv_timestamp(msg, sk, skb);
94014
94015 serr = SKB_EXT_ERR(skb);
94016 - put_cmsg(msg, level, type, sizeof(serr->ee), &serr->ee);
94017 + ee = serr->ee;
94018 + put_cmsg(msg, level, type, sizeof ee, &ee);
94019
94020 msg->msg_flags |= MSG_ERRQUEUE;
94021 err = copied;
94022 diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
94023 index a0e9cf6..ef7f9ed 100644
94024 --- a/net/core/sock_diag.c
94025 +++ b/net/core/sock_diag.c
94026 @@ -9,26 +9,33 @@
94027 #include <linux/inet_diag.h>
94028 #include <linux/sock_diag.h>
94029
94030 -static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
94031 +static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
94032 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
94033 static DEFINE_MUTEX(sock_diag_table_mutex);
94034
94035 int sock_diag_check_cookie(void *sk, __u32 *cookie)
94036 {
94037 +#ifndef CONFIG_GRKERNSEC_HIDESYM
94038 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
94039 cookie[1] != INET_DIAG_NOCOOKIE) &&
94040 ((u32)(unsigned long)sk != cookie[0] ||
94041 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
94042 return -ESTALE;
94043 else
94044 +#endif
94045 return 0;
94046 }
94047 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
94048
94049 void sock_diag_save_cookie(void *sk, __u32 *cookie)
94050 {
94051 +#ifdef CONFIG_GRKERNSEC_HIDESYM
94052 + cookie[0] = 0;
94053 + cookie[1] = 0;
94054 +#else
94055 cookie[0] = (u32)(unsigned long)sk;
94056 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
94057 +#endif
94058 }
94059 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
94060
94061 @@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
94062 mutex_lock(&sock_diag_table_mutex);
94063 if (sock_diag_handlers[hndl->family])
94064 err = -EBUSY;
94065 - else
94066 + else {
94067 + pax_open_kernel();
94068 sock_diag_handlers[hndl->family] = hndl;
94069 + pax_close_kernel();
94070 + }
94071 mutex_unlock(&sock_diag_table_mutex);
94072
94073 return err;
94074 @@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
94075
94076 mutex_lock(&sock_diag_table_mutex);
94077 BUG_ON(sock_diag_handlers[family] != hnld);
94078 + pax_open_kernel();
94079 sock_diag_handlers[family] = NULL;
94080 + pax_close_kernel();
94081 mutex_unlock(&sock_diag_table_mutex);
94082 }
94083 EXPORT_SYMBOL_GPL(sock_diag_unregister);
94084 diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
94085 index cca4441..5e616de 100644
94086 --- a/net/core/sysctl_net_core.c
94087 +++ b/net/core/sysctl_net_core.c
94088 @@ -32,7 +32,7 @@ static int rps_sock_flow_sysctl(struct ctl_table *table, int write,
94089 {
94090 unsigned int orig_size, size;
94091 int ret, i;
94092 - struct ctl_table tmp = {
94093 + ctl_table_no_const tmp = {
94094 .data = &size,
94095 .maxlen = sizeof(size),
94096 .mode = table->mode
94097 @@ -199,7 +199,7 @@ static int set_default_qdisc(struct ctl_table *table, int write,
94098 void __user *buffer, size_t *lenp, loff_t *ppos)
94099 {
94100 char id[IFNAMSIZ];
94101 - struct ctl_table tbl = {
94102 + ctl_table_no_const tbl = {
94103 .data = id,
94104 .maxlen = IFNAMSIZ,
94105 };
94106 @@ -378,13 +378,12 @@ static struct ctl_table netns_core_table[] = {
94107
94108 static __net_init int sysctl_core_net_init(struct net *net)
94109 {
94110 - struct ctl_table *tbl;
94111 + ctl_table_no_const *tbl = NULL;
94112
94113 net->core.sysctl_somaxconn = SOMAXCONN;
94114
94115 - tbl = netns_core_table;
94116 if (!net_eq(net, &init_net)) {
94117 - tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
94118 + tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
94119 if (tbl == NULL)
94120 goto err_dup;
94121
94122 @@ -394,17 +393,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
94123 if (net->user_ns != &init_user_ns) {
94124 tbl[0].procname = NULL;
94125 }
94126 - }
94127 -
94128 - net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
94129 + net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
94130 + } else
94131 + net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
94132 if (net->core.sysctl_hdr == NULL)
94133 goto err_reg;
94134
94135 return 0;
94136
94137 err_reg:
94138 - if (tbl != netns_core_table)
94139 - kfree(tbl);
94140 + kfree(tbl);
94141 err_dup:
94142 return -ENOMEM;
94143 }
94144 @@ -419,7 +417,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
94145 kfree(tbl);
94146 }
94147
94148 -static __net_initdata struct pernet_operations sysctl_core_ops = {
94149 +static __net_initconst struct pernet_operations sysctl_core_ops = {
94150 .init = sysctl_core_net_init,
94151 .exit = sysctl_core_net_exit,
94152 };
94153 diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
94154 index dd4d506..fb2fb87 100644
94155 --- a/net/decnet/af_decnet.c
94156 +++ b/net/decnet/af_decnet.c
94157 @@ -465,6 +465,7 @@ static struct proto dn_proto = {
94158 .sysctl_rmem = sysctl_decnet_rmem,
94159 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
94160 .obj_size = sizeof(struct dn_sock),
94161 + .slab_flags = SLAB_USERCOPY,
94162 };
94163
94164 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
94165 diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
94166 index 5325b54..a0d4d69 100644
94167 --- a/net/decnet/sysctl_net_decnet.c
94168 +++ b/net/decnet/sysctl_net_decnet.c
94169 @@ -174,7 +174,7 @@ static int dn_node_address_handler(struct ctl_table *table, int write,
94170
94171 if (len > *lenp) len = *lenp;
94172
94173 - if (copy_to_user(buffer, addr, len))
94174 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
94175 return -EFAULT;
94176
94177 *lenp = len;
94178 @@ -237,7 +237,7 @@ static int dn_def_dev_handler(struct ctl_table *table, int write,
94179
94180 if (len > *lenp) len = *lenp;
94181
94182 - if (copy_to_user(buffer, devname, len))
94183 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
94184 return -EFAULT;
94185
94186 *lenp = len;
94187 diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
94188 index 008f337..b03b8c9 100644
94189 --- a/net/ieee802154/6lowpan.c
94190 +++ b/net/ieee802154/6lowpan.c
94191 @@ -548,7 +548,7 @@ static int lowpan_header_create(struct sk_buff *skb,
94192 hc06_ptr += 3;
94193 } else {
94194 /* compress nothing */
94195 - memcpy(hc06_ptr, &hdr, 4);
94196 + memcpy(hc06_ptr, hdr, 4);
94197 /* replace the top byte with new ECN | DSCP format */
94198 *hc06_ptr = tmp;
94199 hc06_ptr += 4;
94200 diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
94201 index cfeb85c..385989a 100644
94202 --- a/net/ipv4/af_inet.c
94203 +++ b/net/ipv4/af_inet.c
94204 @@ -1675,13 +1675,9 @@ static int __init inet_init(void)
94205
94206 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
94207
94208 - sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
94209 - if (!sysctl_local_reserved_ports)
94210 - goto out;
94211 -
94212 rc = proto_register(&tcp_prot, 1);
94213 if (rc)
94214 - goto out_free_reserved_ports;
94215 + goto out;
94216
94217 rc = proto_register(&udp_prot, 1);
94218 if (rc)
94219 @@ -1790,8 +1786,6 @@ out_unregister_udp_proto:
94220 proto_unregister(&udp_prot);
94221 out_unregister_tcp_proto:
94222 proto_unregister(&tcp_prot);
94223 -out_free_reserved_ports:
94224 - kfree(sysctl_local_reserved_ports);
94225 goto out;
94226 }
94227
94228 diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
94229 index a1b5bcb..62ec5c6 100644
94230 --- a/net/ipv4/devinet.c
94231 +++ b/net/ipv4/devinet.c
94232 @@ -1533,7 +1533,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
94233 idx = 0;
94234 head = &net->dev_index_head[h];
94235 rcu_read_lock();
94236 - cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
94237 + cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
94238 net->dev_base_seq;
94239 hlist_for_each_entry_rcu(dev, head, index_hlist) {
94240 if (idx < s_idx)
94241 @@ -1844,7 +1844,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
94242 idx = 0;
94243 head = &net->dev_index_head[h];
94244 rcu_read_lock();
94245 - cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
94246 + cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
94247 net->dev_base_seq;
94248 hlist_for_each_entry_rcu(dev, head, index_hlist) {
94249 if (idx < s_idx)
94250 @@ -2069,7 +2069,7 @@ static int ipv4_doint_and_flush(struct ctl_table *ctl, int write,
94251 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
94252 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
94253
94254 -static struct devinet_sysctl_table {
94255 +static const struct devinet_sysctl_table {
94256 struct ctl_table_header *sysctl_header;
94257 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
94258 } devinet_sysctl = {
94259 @@ -2191,7 +2191,7 @@ static __net_init int devinet_init_net(struct net *net)
94260 int err;
94261 struct ipv4_devconf *all, *dflt;
94262 #ifdef CONFIG_SYSCTL
94263 - struct ctl_table *tbl = ctl_forward_entry;
94264 + ctl_table_no_const *tbl = NULL;
94265 struct ctl_table_header *forw_hdr;
94266 #endif
94267
94268 @@ -2209,7 +2209,7 @@ static __net_init int devinet_init_net(struct net *net)
94269 goto err_alloc_dflt;
94270
94271 #ifdef CONFIG_SYSCTL
94272 - tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
94273 + tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
94274 if (tbl == NULL)
94275 goto err_alloc_ctl;
94276
94277 @@ -2229,7 +2229,10 @@ static __net_init int devinet_init_net(struct net *net)
94278 goto err_reg_dflt;
94279
94280 err = -ENOMEM;
94281 - forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
94282 + if (!net_eq(net, &init_net))
94283 + forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
94284 + else
94285 + forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
94286 if (forw_hdr == NULL)
94287 goto err_reg_ctl;
94288 net->ipv4.forw_hdr = forw_hdr;
94289 @@ -2245,8 +2248,7 @@ err_reg_ctl:
94290 err_reg_dflt:
94291 __devinet_sysctl_unregister(all);
94292 err_reg_all:
94293 - if (tbl != ctl_forward_entry)
94294 - kfree(tbl);
94295 + kfree(tbl);
94296 err_alloc_ctl:
94297 #endif
94298 if (dflt != &ipv4_devconf_dflt)
94299 diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
94300 index b3f627a..b0f3e99 100644
94301 --- a/net/ipv4/fib_frontend.c
94302 +++ b/net/ipv4/fib_frontend.c
94303 @@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
94304 #ifdef CONFIG_IP_ROUTE_MULTIPATH
94305 fib_sync_up(dev);
94306 #endif
94307 - atomic_inc(&net->ipv4.dev_addr_genid);
94308 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
94309 rt_cache_flush(dev_net(dev));
94310 break;
94311 case NETDEV_DOWN:
94312 fib_del_ifaddr(ifa, NULL);
94313 - atomic_inc(&net->ipv4.dev_addr_genid);
94314 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
94315 if (ifa->ifa_dev->ifa_list == NULL) {
94316 /* Last address was deleted from this interface.
94317 * Disable IP.
94318 @@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
94319 #ifdef CONFIG_IP_ROUTE_MULTIPATH
94320 fib_sync_up(dev);
94321 #endif
94322 - atomic_inc(&net->ipv4.dev_addr_genid);
94323 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
94324 rt_cache_flush(net);
94325 break;
94326 case NETDEV_DOWN:
94327 diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
94328 index d5dbca5..6251d5f 100644
94329 --- a/net/ipv4/fib_semantics.c
94330 +++ b/net/ipv4/fib_semantics.c
94331 @@ -766,7 +766,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
94332 nh->nh_saddr = inet_select_addr(nh->nh_dev,
94333 nh->nh_gw,
94334 nh->nh_parent->fib_scope);
94335 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
94336 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
94337
94338 return nh->nh_saddr;
94339 }
94340 diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
94341 index 6acb541..9ea617d 100644
94342 --- a/net/ipv4/inet_connection_sock.c
94343 +++ b/net/ipv4/inet_connection_sock.c
94344 @@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
94345 .range = { 32768, 61000 },
94346 };
94347
94348 -unsigned long *sysctl_local_reserved_ports;
94349 +unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
94350 EXPORT_SYMBOL(sysctl_local_reserved_ports);
94351
94352 void inet_get_local_port_range(int *low, int *high)
94353 diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
94354 index 5f64875..31cf54d 100644
94355 --- a/net/ipv4/inet_diag.c
94356 +++ b/net/ipv4/inet_diag.c
94357 @@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
94358
94359 r->id.idiag_sport = inet->inet_sport;
94360 r->id.idiag_dport = inet->inet_dport;
94361 +
94362 + memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
94363 + memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
94364 +
94365 r->id.idiag_src[0] = inet->inet_rcv_saddr;
94366 r->id.idiag_dst[0] = inet->inet_daddr;
94367
94368 @@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
94369
94370 r->idiag_family = tw->tw_family;
94371 r->idiag_retrans = 0;
94372 +
94373 r->id.idiag_if = tw->tw_bound_dev_if;
94374 sock_diag_save_cookie(tw, r->id.idiag_cookie);
94375 +
94376 r->id.idiag_sport = tw->tw_sport;
94377 r->id.idiag_dport = tw->tw_dport;
94378 +
94379 + memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
94380 + memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
94381 +
94382 r->id.idiag_src[0] = tw->tw_rcv_saddr;
94383 r->id.idiag_dst[0] = tw->tw_daddr;
94384 +
94385 r->idiag_state = tw->tw_substate;
94386 r->idiag_timer = 3;
94387 r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
94388 @@ -732,8 +743,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
94389
94390 r->id.idiag_sport = inet->inet_sport;
94391 r->id.idiag_dport = ireq->rmt_port;
94392 +
94393 + memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
94394 + memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
94395 +
94396 r->id.idiag_src[0] = ireq->loc_addr;
94397 r->id.idiag_dst[0] = ireq->rmt_addr;
94398 +
94399 r->idiag_expires = jiffies_to_msecs(tmo);
94400 r->idiag_rqueue = 0;
94401 r->idiag_wqueue = 0;
94402 diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
94403 index 96da9c7..b956690 100644
94404 --- a/net/ipv4/inet_hashtables.c
94405 +++ b/net/ipv4/inet_hashtables.c
94406 @@ -18,12 +18,15 @@
94407 #include <linux/sched.h>
94408 #include <linux/slab.h>
94409 #include <linux/wait.h>
94410 +#include <linux/security.h>
94411
94412 #include <net/inet_connection_sock.h>
94413 #include <net/inet_hashtables.h>
94414 #include <net/secure_seq.h>
94415 #include <net/ip.h>
94416
94417 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
94418 +
94419 /*
94420 * Allocate and initialize a new local port bind bucket.
94421 * The bindhash mutex for snum's hash chain must be held here.
94422 @@ -554,6 +557,8 @@ ok:
94423 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
94424 spin_unlock(&head->lock);
94425
94426 + gr_update_task_in_ip_table(current, inet_sk(sk));
94427 +
94428 if (tw) {
94429 inet_twsk_deschedule(tw, death_row);
94430 while (twrefcnt) {
94431 diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
94432 index 33d5537..da337a4 100644
94433 --- a/net/ipv4/inetpeer.c
94434 +++ b/net/ipv4/inetpeer.c
94435 @@ -503,8 +503,8 @@ relookup:
94436 if (p) {
94437 p->daddr = *daddr;
94438 atomic_set(&p->refcnt, 1);
94439 - atomic_set(&p->rid, 0);
94440 - atomic_set(&p->ip_id_count,
94441 + atomic_set_unchecked(&p->rid, 0);
94442 + atomic_set_unchecked(&p->ip_id_count,
94443 (daddr->family == AF_INET) ?
94444 secure_ip_id(daddr->addr.a4) :
94445 secure_ipv6_id(daddr->addr.a6));
94446 diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
94447 index b66910a..cfe416e 100644
94448 --- a/net/ipv4/ip_fragment.c
94449 +++ b/net/ipv4/ip_fragment.c
94450 @@ -282,7 +282,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
94451 return 0;
94452
94453 start = qp->rid;
94454 - end = atomic_inc_return(&peer->rid);
94455 + end = atomic_inc_return_unchecked(&peer->rid);
94456 qp->rid = end;
94457
94458 rc = qp->q.fragments && (end - start) > max;
94459 @@ -759,12 +759,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
94460
94461 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
94462 {
94463 - struct ctl_table *table;
94464 + ctl_table_no_const *table = NULL;
94465 struct ctl_table_header *hdr;
94466
94467 - table = ip4_frags_ns_ctl_table;
94468 if (!net_eq(net, &init_net)) {
94469 - table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
94470 + table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
94471 if (table == NULL)
94472 goto err_alloc;
94473
94474 @@ -775,9 +774,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
94475 /* Don't export sysctls to unprivileged users */
94476 if (net->user_ns != &init_user_ns)
94477 table[0].procname = NULL;
94478 - }
94479 + hdr = register_net_sysctl(net, "net/ipv4", table);
94480 + } else
94481 + hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
94482
94483 - hdr = register_net_sysctl(net, "net/ipv4", table);
94484 if (hdr == NULL)
94485 goto err_reg;
94486
94487 @@ -785,8 +785,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
94488 return 0;
94489
94490 err_reg:
94491 - if (!net_eq(net, &init_net))
94492 - kfree(table);
94493 + kfree(table);
94494 err_alloc:
94495 return -ENOMEM;
94496 }
94497 diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
94498 index d7aea4c..a8ee872 100644
94499 --- a/net/ipv4/ip_gre.c
94500 +++ b/net/ipv4/ip_gre.c
94501 @@ -115,7 +115,7 @@ static bool log_ecn_error = true;
94502 module_param(log_ecn_error, bool, 0644);
94503 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
94504
94505 -static struct rtnl_link_ops ipgre_link_ops __read_mostly;
94506 +static struct rtnl_link_ops ipgre_link_ops;
94507 static int ipgre_tunnel_init(struct net_device *dev);
94508
94509 static int ipgre_net_id __read_mostly;
94510 @@ -731,7 +731,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
94511 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
94512 };
94513
94514 -static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
94515 +static struct rtnl_link_ops ipgre_link_ops = {
94516 .kind = "gre",
94517 .maxtype = IFLA_GRE_MAX,
94518 .policy = ipgre_policy,
94519 @@ -745,7 +745,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
94520 .fill_info = ipgre_fill_info,
94521 };
94522
94523 -static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
94524 +static struct rtnl_link_ops ipgre_tap_ops = {
94525 .kind = "gretap",
94526 .maxtype = IFLA_GRE_MAX,
94527 .policy = ipgre_policy,
94528 diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
94529 index 23e6ab0..be67a57 100644
94530 --- a/net/ipv4/ip_sockglue.c
94531 +++ b/net/ipv4/ip_sockglue.c
94532 @@ -1153,7 +1153,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
94533 len = min_t(unsigned int, len, opt->optlen);
94534 if (put_user(len, optlen))
94535 return -EFAULT;
94536 - if (copy_to_user(optval, opt->__data, len))
94537 + if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
94538 + copy_to_user(optval, opt->__data, len))
94539 return -EFAULT;
94540 return 0;
94541 }
94542 @@ -1284,7 +1285,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
94543 if (sk->sk_type != SOCK_STREAM)
94544 return -ENOPROTOOPT;
94545
94546 - msg.msg_control = optval;
94547 + msg.msg_control = (void __force_kernel *)optval;
94548 msg.msg_controllen = len;
94549 msg.msg_flags = flags;
94550
94551 diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
94552 index 26847e1..75d2d2f 100644
94553 --- a/net/ipv4/ip_vti.c
94554 +++ b/net/ipv4/ip_vti.c
94555 @@ -44,7 +44,7 @@
94556 #include <net/net_namespace.h>
94557 #include <net/netns/generic.h>
94558
94559 -static struct rtnl_link_ops vti_link_ops __read_mostly;
94560 +static struct rtnl_link_ops vti_link_ops;
94561
94562 static int vti_net_id __read_mostly;
94563 static int vti_tunnel_init(struct net_device *dev);
94564 @@ -425,7 +425,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
94565 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
94566 };
94567
94568 -static struct rtnl_link_ops vti_link_ops __read_mostly = {
94569 +static struct rtnl_link_ops vti_link_ops = {
94570 .kind = "vti",
94571 .maxtype = IFLA_VTI_MAX,
94572 .policy = vti_policy,
94573 diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
94574 index efa1138..20dbba0 100644
94575 --- a/net/ipv4/ipconfig.c
94576 +++ b/net/ipv4/ipconfig.c
94577 @@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
94578
94579 mm_segment_t oldfs = get_fs();
94580 set_fs(get_ds());
94581 - res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
94582 + res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
94583 set_fs(oldfs);
94584 return res;
94585 }
94586 @@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
94587
94588 mm_segment_t oldfs = get_fs();
94589 set_fs(get_ds());
94590 - res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
94591 + res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
94592 set_fs(oldfs);
94593 return res;
94594 }
94595 @@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
94596
94597 mm_segment_t oldfs = get_fs();
94598 set_fs(get_ds());
94599 - res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
94600 + res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
94601 set_fs(oldfs);
94602 return res;
94603 }
94604 diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
94605 index 7f80fb4..b0328f6 100644
94606 --- a/net/ipv4/ipip.c
94607 +++ b/net/ipv4/ipip.c
94608 @@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
94609 static int ipip_net_id __read_mostly;
94610
94611 static int ipip_tunnel_init(struct net_device *dev);
94612 -static struct rtnl_link_ops ipip_link_ops __read_mostly;
94613 +static struct rtnl_link_ops ipip_link_ops;
94614
94615 static int ipip_err(struct sk_buff *skb, u32 info)
94616 {
94617 @@ -408,7 +408,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
94618 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
94619 };
94620
94621 -static struct rtnl_link_ops ipip_link_ops __read_mostly = {
94622 +static struct rtnl_link_ops ipip_link_ops = {
94623 .kind = "ipip",
94624 .maxtype = IFLA_IPTUN_MAX,
94625 .policy = ipip_policy,
94626 diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
94627 index 85a4f21..1beb1f5 100644
94628 --- a/net/ipv4/netfilter/arp_tables.c
94629 +++ b/net/ipv4/netfilter/arp_tables.c
94630 @@ -880,14 +880,14 @@ static int compat_table_info(const struct xt_table_info *info,
94631 #endif
94632
94633 static int get_info(struct net *net, void __user *user,
94634 - const int *len, int compat)
94635 + int len, int compat)
94636 {
94637 char name[XT_TABLE_MAXNAMELEN];
94638 struct xt_table *t;
94639 int ret;
94640
94641 - if (*len != sizeof(struct arpt_getinfo)) {
94642 - duprintf("length %u != %Zu\n", *len,
94643 + if (len != sizeof(struct arpt_getinfo)) {
94644 + duprintf("length %u != %Zu\n", len,
94645 sizeof(struct arpt_getinfo));
94646 return -EINVAL;
94647 }
94648 @@ -924,7 +924,7 @@ static int get_info(struct net *net, void __user *user,
94649 info.size = private->size;
94650 strcpy(info.name, name);
94651
94652 - if (copy_to_user(user, &info, *len) != 0)
94653 + if (copy_to_user(user, &info, len) != 0)
94654 ret = -EFAULT;
94655 else
94656 ret = 0;
94657 @@ -1683,7 +1683,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
94658
94659 switch (cmd) {
94660 case ARPT_SO_GET_INFO:
94661 - ret = get_info(sock_net(sk), user, len, 1);
94662 + ret = get_info(sock_net(sk), user, *len, 1);
94663 break;
94664 case ARPT_SO_GET_ENTRIES:
94665 ret = compat_get_entries(sock_net(sk), user, len);
94666 @@ -1728,7 +1728,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
94667
94668 switch (cmd) {
94669 case ARPT_SO_GET_INFO:
94670 - ret = get_info(sock_net(sk), user, len, 0);
94671 + ret = get_info(sock_net(sk), user, *len, 0);
94672 break;
94673
94674 case ARPT_SO_GET_ENTRIES:
94675 diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
94676 index d23118d..6ad7277 100644
94677 --- a/net/ipv4/netfilter/ip_tables.c
94678 +++ b/net/ipv4/netfilter/ip_tables.c
94679 @@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
94680 #endif
94681
94682 static int get_info(struct net *net, void __user *user,
94683 - const int *len, int compat)
94684 + int len, int compat)
94685 {
94686 char name[XT_TABLE_MAXNAMELEN];
94687 struct xt_table *t;
94688 int ret;
94689
94690 - if (*len != sizeof(struct ipt_getinfo)) {
94691 - duprintf("length %u != %zu\n", *len,
94692 + if (len != sizeof(struct ipt_getinfo)) {
94693 + duprintf("length %u != %zu\n", len,
94694 sizeof(struct ipt_getinfo));
94695 return -EINVAL;
94696 }
94697 @@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
94698 info.size = private->size;
94699 strcpy(info.name, name);
94700
94701 - if (copy_to_user(user, &info, *len) != 0)
94702 + if (copy_to_user(user, &info, len) != 0)
94703 ret = -EFAULT;
94704 else
94705 ret = 0;
94706 @@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
94707
94708 switch (cmd) {
94709 case IPT_SO_GET_INFO:
94710 - ret = get_info(sock_net(sk), user, len, 1);
94711 + ret = get_info(sock_net(sk), user, *len, 1);
94712 break;
94713 case IPT_SO_GET_ENTRIES:
94714 ret = compat_get_entries(sock_net(sk), user, len);
94715 @@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
94716
94717 switch (cmd) {
94718 case IPT_SO_GET_INFO:
94719 - ret = get_info(sock_net(sk), user, len, 0);
94720 + ret = get_info(sock_net(sk), user, *len, 0);
94721 break;
94722
94723 case IPT_SO_GET_ENTRIES:
94724 diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
94725 index cbc2215..9cb993c 100644
94726 --- a/net/ipv4/netfilter/ipt_ULOG.c
94727 +++ b/net/ipv4/netfilter/ipt_ULOG.c
94728 @@ -220,6 +220,7 @@ static void ipt_ulog_packet(struct net *net,
94729 ub->qlen++;
94730
94731 pm = nlmsg_data(nlh);
94732 + memset(pm, 0, sizeof(*pm));
94733
94734 /* We might not have a timestamp, get one */
94735 if (skb->tstamp.tv64 == 0)
94736 @@ -238,8 +239,6 @@ static void ipt_ulog_packet(struct net *net,
94737 }
94738 else if (loginfo->prefix[0] != '\0')
94739 strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
94740 - else
94741 - *(pm->prefix) = '\0';
94742
94743 if (in && in->hard_header_len > 0 &&
94744 skb->mac_header != skb->network_header &&
94745 @@ -251,13 +250,9 @@ static void ipt_ulog_packet(struct net *net,
94746
94747 if (in)
94748 strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
94749 - else
94750 - pm->indev_name[0] = '\0';
94751
94752 if (out)
94753 strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
94754 - else
94755 - pm->outdev_name[0] = '\0';
94756
94757 /* copy_len <= skb->len, so can't fail. */
94758 if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
94759 diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
94760 index c482f7c..2784262 100644
94761 --- a/net/ipv4/ping.c
94762 +++ b/net/ipv4/ping.c
94763 @@ -55,7 +55,7 @@
94764
94765
94766 struct ping_table ping_table;
94767 -struct pingv6_ops pingv6_ops;
94768 +struct pingv6_ops *pingv6_ops;
94769 EXPORT_SYMBOL_GPL(pingv6_ops);
94770
94771 static u16 ping_port_rover;
94772 @@ -335,7 +335,7 @@ static int ping_check_bind_addr(struct sock *sk, struct inet_sock *isk,
94773 return -ENODEV;
94774 }
94775 }
94776 - has_addr = pingv6_ops.ipv6_chk_addr(net, &addr->sin6_addr, dev,
94777 + has_addr = pingv6_ops->ipv6_chk_addr(net, &addr->sin6_addr, dev,
94778 scoped);
94779 rcu_read_unlock();
94780
94781 @@ -541,7 +541,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
94782 }
94783 #if IS_ENABLED(CONFIG_IPV6)
94784 } else if (skb->protocol == htons(ETH_P_IPV6)) {
94785 - harderr = pingv6_ops.icmpv6_err_convert(type, code, &err);
94786 + harderr = pingv6_ops->icmpv6_err_convert(type, code, &err);
94787 #endif
94788 }
94789
94790 @@ -559,7 +559,7 @@ void ping_err(struct sk_buff *skb, int offset, u32 info)
94791 info, (u8 *)icmph);
94792 #if IS_ENABLED(CONFIG_IPV6)
94793 } else if (family == AF_INET6) {
94794 - pingv6_ops.ipv6_icmp_error(sk, skb, err, 0,
94795 + pingv6_ops->ipv6_icmp_error(sk, skb, err, 0,
94796 info, (u8 *)icmph);
94797 #endif
94798 }
94799 @@ -841,7 +841,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
94800 return ip_recv_error(sk, msg, len, addr_len);
94801 #if IS_ENABLED(CONFIG_IPV6)
94802 } else if (family == AF_INET6) {
94803 - return pingv6_ops.ipv6_recv_error(sk, msg, len,
94804 + return pingv6_ops->ipv6_recv_error(sk, msg, len,
94805 addr_len);
94806 #endif
94807 }
94808 @@ -900,7 +900,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
94809 }
94810
94811 if (inet6_sk(sk)->rxopt.all)
94812 - pingv6_ops.ip6_datagram_recv_ctl(sk, msg, skb);
94813 + pingv6_ops->ip6_datagram_recv_ctl(sk, msg, skb);
94814 #endif
94815 } else {
94816 BUG();
94817 @@ -1090,7 +1090,7 @@ static void ping_v4_format_sock(struct sock *sp, struct seq_file *f,
94818 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
94819 0, sock_i_ino(sp),
94820 atomic_read(&sp->sk_refcnt), sp,
94821 - atomic_read(&sp->sk_drops), len);
94822 + atomic_read_unchecked(&sp->sk_drops), len);
94823 }
94824
94825 static int ping_v4_seq_show(struct seq_file *seq, void *v)
94826 diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
94827 index 7d3db78..9fd511d 100644
94828 --- a/net/ipv4/raw.c
94829 +++ b/net/ipv4/raw.c
94830 @@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
94831 int raw_rcv(struct sock *sk, struct sk_buff *skb)
94832 {
94833 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
94834 - atomic_inc(&sk->sk_drops);
94835 + atomic_inc_unchecked(&sk->sk_drops);
94836 kfree_skb(skb);
94837 return NET_RX_DROP;
94838 }
94839 @@ -746,16 +746,20 @@ static int raw_init(struct sock *sk)
94840
94841 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
94842 {
94843 + struct icmp_filter filter;
94844 +
94845 if (optlen > sizeof(struct icmp_filter))
94846 optlen = sizeof(struct icmp_filter);
94847 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
94848 + if (copy_from_user(&filter, optval, optlen))
94849 return -EFAULT;
94850 + raw_sk(sk)->filter = filter;
94851 return 0;
94852 }
94853
94854 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
94855 {
94856 int len, ret = -EFAULT;
94857 + struct icmp_filter filter;
94858
94859 if (get_user(len, optlen))
94860 goto out;
94861 @@ -765,8 +769,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
94862 if (len > sizeof(struct icmp_filter))
94863 len = sizeof(struct icmp_filter);
94864 ret = -EFAULT;
94865 - if (put_user(len, optlen) ||
94866 - copy_to_user(optval, &raw_sk(sk)->filter, len))
94867 + filter = raw_sk(sk)->filter;
94868 + if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
94869 goto out;
94870 ret = 0;
94871 out: return ret;
94872 @@ -995,7 +999,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
94873 0, 0L, 0,
94874 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
94875 0, sock_i_ino(sp),
94876 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
94877 + atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
94878 }
94879
94880 static int raw_seq_show(struct seq_file *seq, void *v)
94881 diff --git a/net/ipv4/route.c b/net/ipv4/route.c
94882 index 62290b5..f0d944f 100644
94883 --- a/net/ipv4/route.c
94884 +++ b/net/ipv4/route.c
94885 @@ -2617,34 +2617,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
94886 .maxlen = sizeof(int),
94887 .mode = 0200,
94888 .proc_handler = ipv4_sysctl_rtcache_flush,
94889 + .extra1 = &init_net,
94890 },
94891 { },
94892 };
94893
94894 static __net_init int sysctl_route_net_init(struct net *net)
94895 {
94896 - struct ctl_table *tbl;
94897 + ctl_table_no_const *tbl = NULL;
94898
94899 - tbl = ipv4_route_flush_table;
94900 if (!net_eq(net, &init_net)) {
94901 - tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
94902 + tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
94903 if (tbl == NULL)
94904 goto err_dup;
94905
94906 /* Don't export sysctls to unprivileged users */
94907 if (net->user_ns != &init_user_ns)
94908 tbl[0].procname = NULL;
94909 - }
94910 - tbl[0].extra1 = net;
94911 + tbl[0].extra1 = net;
94912 + net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
94913 + } else
94914 + net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
94915
94916 - net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
94917 if (net->ipv4.route_hdr == NULL)
94918 goto err_reg;
94919 return 0;
94920
94921 err_reg:
94922 - if (tbl != ipv4_route_flush_table)
94923 - kfree(tbl);
94924 + kfree(tbl);
94925 err_dup:
94926 return -ENOMEM;
94927 }
94928 @@ -2667,8 +2667,8 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
94929
94930 static __net_init int rt_genid_init(struct net *net)
94931 {
94932 - atomic_set(&net->ipv4.rt_genid, 0);
94933 - atomic_set(&net->fnhe_genid, 0);
94934 + atomic_set_unchecked(&net->ipv4.rt_genid, 0);
94935 + atomic_set_unchecked(&net->fnhe_genid, 0);
94936 get_random_bytes(&net->ipv4.dev_addr_genid,
94937 sizeof(net->ipv4.dev_addr_genid));
94938 return 0;
94939 diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
94940 index 540279f..9855b16 100644
94941 --- a/net/ipv4/sysctl_net_ipv4.c
94942 +++ b/net/ipv4/sysctl_net_ipv4.c
94943 @@ -58,7 +58,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
94944 {
94945 int ret;
94946 int range[2];
94947 - struct ctl_table tmp = {
94948 + ctl_table_no_const tmp = {
94949 .data = &range,
94950 .maxlen = sizeof(range),
94951 .mode = table->mode,
94952 @@ -111,7 +111,7 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
94953 int ret;
94954 gid_t urange[2];
94955 kgid_t low, high;
94956 - struct ctl_table tmp = {
94957 + ctl_table_no_const tmp = {
94958 .data = &urange,
94959 .maxlen = sizeof(urange),
94960 .mode = table->mode,
94961 @@ -142,7 +142,7 @@ static int proc_tcp_congestion_control(struct ctl_table *ctl, int write,
94962 void __user *buffer, size_t *lenp, loff_t *ppos)
94963 {
94964 char val[TCP_CA_NAME_MAX];
94965 - struct ctl_table tbl = {
94966 + ctl_table_no_const tbl = {
94967 .data = val,
94968 .maxlen = TCP_CA_NAME_MAX,
94969 };
94970 @@ -161,7 +161,7 @@ static int proc_tcp_available_congestion_control(struct ctl_table *ctl,
94971 void __user *buffer, size_t *lenp,
94972 loff_t *ppos)
94973 {
94974 - struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
94975 + ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
94976 int ret;
94977
94978 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
94979 @@ -178,7 +178,7 @@ static int proc_allowed_congestion_control(struct ctl_table *ctl,
94980 void __user *buffer, size_t *lenp,
94981 loff_t *ppos)
94982 {
94983 - struct ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
94984 + ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
94985 int ret;
94986
94987 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
94988 @@ -204,15 +204,17 @@ static int ipv4_tcp_mem(struct ctl_table *ctl, int write,
94989 struct mem_cgroup *memcg;
94990 #endif
94991
94992 - struct ctl_table tmp = {
94993 + ctl_table_no_const tmp = {
94994 .data = &vec,
94995 .maxlen = sizeof(vec),
94996 .mode = ctl->mode,
94997 };
94998
94999 if (!write) {
95000 - ctl->data = &net->ipv4.sysctl_tcp_mem;
95001 - return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
95002 + ctl_table_no_const tcp_mem = *ctl;
95003 +
95004 + tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
95005 + return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
95006 }
95007
95008 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
95009 @@ -240,7 +242,7 @@ static int proc_tcp_fastopen_key(struct ctl_table *ctl, int write,
95010 void __user *buffer, size_t *lenp,
95011 loff_t *ppos)
95012 {
95013 - struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
95014 + ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
95015 struct tcp_fastopen_context *ctxt;
95016 int ret;
95017 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
95018 @@ -483,7 +485,7 @@ static struct ctl_table ipv4_table[] = {
95019 },
95020 {
95021 .procname = "ip_local_reserved_ports",
95022 - .data = NULL, /* initialized in sysctl_ipv4_init */
95023 + .data = sysctl_local_reserved_ports,
95024 .maxlen = 65536,
95025 .mode = 0644,
95026 .proc_handler = proc_do_large_bitmap,
95027 @@ -864,11 +866,10 @@ static struct ctl_table ipv4_net_table[] = {
95028
95029 static __net_init int ipv4_sysctl_init_net(struct net *net)
95030 {
95031 - struct ctl_table *table;
95032 + ctl_table_no_const *table = NULL;
95033
95034 - table = ipv4_net_table;
95035 if (!net_eq(net, &init_net)) {
95036 - table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
95037 + table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
95038 if (table == NULL)
95039 goto err_alloc;
95040
95041 @@ -903,15 +904,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
95042
95043 tcp_init_mem(net);
95044
95045 - net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
95046 + if (!net_eq(net, &init_net))
95047 + net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
95048 + else
95049 + net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
95050 if (net->ipv4.ipv4_hdr == NULL)
95051 goto err_reg;
95052
95053 return 0;
95054
95055 err_reg:
95056 - if (!net_eq(net, &init_net))
95057 - kfree(table);
95058 + kfree(table);
95059 err_alloc:
95060 return -ENOMEM;
95061 }
95062 @@ -933,16 +936,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
95063 static __init int sysctl_ipv4_init(void)
95064 {
95065 struct ctl_table_header *hdr;
95066 - struct ctl_table *i;
95067 -
95068 - for (i = ipv4_table; i->procname; i++) {
95069 - if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
95070 - i->data = sysctl_local_reserved_ports;
95071 - break;
95072 - }
95073 - }
95074 - if (!i->procname)
95075 - return -EINVAL;
95076
95077 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
95078 if (hdr == NULL)
95079 diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
95080 index 068c8fb..a755c52 100644
95081 --- a/net/ipv4/tcp_input.c
95082 +++ b/net/ipv4/tcp_input.c
95083 @@ -4435,7 +4435,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
95084 * simplifies code)
95085 */
95086 static void
95087 -tcp_collapse(struct sock *sk, struct sk_buff_head *list,
95088 +__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
95089 struct sk_buff *head, struct sk_buff *tail,
95090 u32 start, u32 end)
95091 {
95092 @@ -5520,6 +5520,7 @@ discard:
95093 tcp_paws_reject(&tp->rx_opt, 0))
95094 goto discard_and_undo;
95095
95096 +#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
95097 if (th->syn) {
95098 /* We see SYN without ACK. It is attempt of
95099 * simultaneous connect with crossed SYNs.
95100 @@ -5570,6 +5571,7 @@ discard:
95101 goto discard;
95102 #endif
95103 }
95104 +#endif
95105 /* "fifth, if neither of the SYN or RST bits is set then
95106 * drop the segment and return."
95107 */
95108 @@ -5616,7 +5618,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
95109 goto discard;
95110
95111 if (th->syn) {
95112 - if (th->fin)
95113 + if (th->fin || th->urg || th->psh)
95114 goto discard;
95115 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
95116 return 1;
95117 diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
95118 index 5031f68..91569e2 100644
95119 --- a/net/ipv4/tcp_ipv4.c
95120 +++ b/net/ipv4/tcp_ipv4.c
95121 @@ -91,6 +91,10 @@ int sysctl_tcp_low_latency __read_mostly;
95122 EXPORT_SYMBOL(sysctl_tcp_low_latency);
95123
95124
95125 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95126 +extern int grsec_enable_blackhole;
95127 +#endif
95128 +
95129 #ifdef CONFIG_TCP_MD5SIG
95130 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95131 __be32 daddr, __be32 saddr, const struct tcphdr *th);
95132 @@ -1829,6 +1833,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
95133 return 0;
95134
95135 reset:
95136 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95137 + if (!grsec_enable_blackhole)
95138 +#endif
95139 tcp_v4_send_reset(rsk, skb);
95140 discard:
95141 kfree_skb(skb);
95142 @@ -1974,12 +1981,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
95143 TCP_SKB_CB(skb)->sacked = 0;
95144
95145 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
95146 - if (!sk)
95147 + if (!sk) {
95148 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95149 + ret = 1;
95150 +#endif
95151 goto no_tcp_socket;
95152 -
95153 + }
95154 process:
95155 - if (sk->sk_state == TCP_TIME_WAIT)
95156 + if (sk->sk_state == TCP_TIME_WAIT) {
95157 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95158 + ret = 2;
95159 +#endif
95160 goto do_time_wait;
95161 + }
95162
95163 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
95164 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
95165 @@ -2033,6 +2047,10 @@ csum_error:
95166 bad_packet:
95167 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
95168 } else {
95169 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95170 + if (!grsec_enable_blackhole || (ret == 1 &&
95171 + (skb->dev->flags & IFF_LOOPBACK)))
95172 +#endif
95173 tcp_v4_send_reset(NULL, skb);
95174 }
95175
95176 diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
95177 index 58a3e69..7f0626e 100644
95178 --- a/net/ipv4/tcp_minisocks.c
95179 +++ b/net/ipv4/tcp_minisocks.c
95180 @@ -27,6 +27,10 @@
95181 #include <net/inet_common.h>
95182 #include <net/xfrm.h>
95183
95184 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95185 +extern int grsec_enable_blackhole;
95186 +#endif
95187 +
95188 int sysctl_tcp_syncookies __read_mostly = 1;
95189 EXPORT_SYMBOL(sysctl_tcp_syncookies);
95190
95191 @@ -711,7 +715,10 @@ embryonic_reset:
95192 * avoid becoming vulnerable to outside attack aiming at
95193 * resetting legit local connections.
95194 */
95195 - req->rsk_ops->send_reset(sk, skb);
95196 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95197 + if (!grsec_enable_blackhole)
95198 +#endif
95199 + req->rsk_ops->send_reset(sk, skb);
95200 } else if (fastopen) { /* received a valid RST pkt */
95201 reqsk_fastopen_remove(sk, req, true);
95202 tcp_reset(sk);
95203 diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
95204 index 611beab..c4b6e1d 100644
95205 --- a/net/ipv4/tcp_probe.c
95206 +++ b/net/ipv4/tcp_probe.c
95207 @@ -245,7 +245,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
95208 if (cnt + width >= len)
95209 break;
95210
95211 - if (copy_to_user(buf + cnt, tbuf, width))
95212 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
95213 return -EFAULT;
95214 cnt += width;
95215 }
95216 diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
95217 index 4b85e6f..22f9ac9 100644
95218 --- a/net/ipv4/tcp_timer.c
95219 +++ b/net/ipv4/tcp_timer.c
95220 @@ -22,6 +22,10 @@
95221 #include <linux/gfp.h>
95222 #include <net/tcp.h>
95223
95224 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95225 +extern int grsec_lastack_retries;
95226 +#endif
95227 +
95228 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
95229 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
95230 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
95231 @@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
95232 }
95233 }
95234
95235 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95236 + if ((sk->sk_state == TCP_LAST_ACK) &&
95237 + (grsec_lastack_retries > 0) &&
95238 + (grsec_lastack_retries < retry_until))
95239 + retry_until = grsec_lastack_retries;
95240 +#endif
95241 +
95242 if (retransmits_timed_out(sk, retry_until,
95243 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
95244 /* Has it gone just too far? */
95245 diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
95246 index 5e2c2f1..6473c22 100644
95247 --- a/net/ipv4/udp.c
95248 +++ b/net/ipv4/udp.c
95249 @@ -87,6 +87,7 @@
95250 #include <linux/types.h>
95251 #include <linux/fcntl.h>
95252 #include <linux/module.h>
95253 +#include <linux/security.h>
95254 #include <linux/socket.h>
95255 #include <linux/sockios.h>
95256 #include <linux/igmp.h>
95257 @@ -112,6 +113,10 @@
95258 #include <net/busy_poll.h>
95259 #include "udp_impl.h"
95260
95261 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95262 +extern int grsec_enable_blackhole;
95263 +#endif
95264 +
95265 struct udp_table udp_table __read_mostly;
95266 EXPORT_SYMBOL(udp_table);
95267
95268 @@ -595,6 +600,9 @@ found:
95269 return s;
95270 }
95271
95272 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
95273 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
95274 +
95275 /*
95276 * This routine is called by the ICMP module when it gets some
95277 * sort of error condition. If err < 0 then the socket should
95278 @@ -892,9 +900,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
95279 dport = usin->sin_port;
95280 if (dport == 0)
95281 return -EINVAL;
95282 +
95283 + err = gr_search_udp_sendmsg(sk, usin);
95284 + if (err)
95285 + return err;
95286 } else {
95287 if (sk->sk_state != TCP_ESTABLISHED)
95288 return -EDESTADDRREQ;
95289 +
95290 + err = gr_search_udp_sendmsg(sk, NULL);
95291 + if (err)
95292 + return err;
95293 +
95294 daddr = inet->inet_daddr;
95295 dport = inet->inet_dport;
95296 /* Open fast path for connected socket.
95297 @@ -1141,7 +1158,7 @@ static unsigned int first_packet_length(struct sock *sk)
95298 IS_UDPLITE(sk));
95299 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
95300 IS_UDPLITE(sk));
95301 - atomic_inc(&sk->sk_drops);
95302 + atomic_inc_unchecked(&sk->sk_drops);
95303 __skb_unlink(skb, rcvq);
95304 __skb_queue_tail(&list_kill, skb);
95305 }
95306 @@ -1221,6 +1238,10 @@ try_again:
95307 if (!skb)
95308 goto out;
95309
95310 + err = gr_search_udp_recvmsg(sk, skb);
95311 + if (err)
95312 + goto out_free;
95313 +
95314 ulen = skb->len - sizeof(struct udphdr);
95315 copied = len;
95316 if (copied > ulen)
95317 @@ -1254,7 +1275,7 @@ try_again:
95318 if (unlikely(err)) {
95319 trace_kfree_skb(skb, udp_recvmsg);
95320 if (!peeked) {
95321 - atomic_inc(&sk->sk_drops);
95322 + atomic_inc_unchecked(&sk->sk_drops);
95323 UDP_INC_STATS_USER(sock_net(sk),
95324 UDP_MIB_INERRORS, is_udplite);
95325 }
95326 @@ -1542,7 +1563,7 @@ csum_error:
95327 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
95328 drop:
95329 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
95330 - atomic_inc(&sk->sk_drops);
95331 + atomic_inc_unchecked(&sk->sk_drops);
95332 kfree_skb(skb);
95333 return -1;
95334 }
95335 @@ -1561,7 +1582,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
95336 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
95337
95338 if (!skb1) {
95339 - atomic_inc(&sk->sk_drops);
95340 + atomic_inc_unchecked(&sk->sk_drops);
95341 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
95342 IS_UDPLITE(sk));
95343 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
95344 @@ -1733,6 +1754,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
95345 goto csum_error;
95346
95347 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
95348 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95349 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
95350 +#endif
95351 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
95352
95353 /*
95354 @@ -2165,7 +2189,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
95355 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
95356 0, sock_i_ino(sp),
95357 atomic_read(&sp->sk_refcnt), sp,
95358 - atomic_read(&sp->sk_drops), len);
95359 + atomic_read_unchecked(&sp->sk_drops), len);
95360 }
95361
95362 int udp4_seq_show(struct seq_file *seq, void *v)
95363 diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
95364 index adf9983..8e45d0a 100644
95365 --- a/net/ipv4/xfrm4_policy.c
95366 +++ b/net/ipv4/xfrm4_policy.c
95367 @@ -186,11 +186,11 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
95368 fl4->flowi4_tos = iph->tos;
95369 }
95370
95371 -static inline int xfrm4_garbage_collect(struct dst_ops *ops)
95372 +static int xfrm4_garbage_collect(struct dst_ops *ops)
95373 {
95374 struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
95375
95376 - xfrm4_policy_afinfo.garbage_collect(net);
95377 + xfrm_garbage_collect_deferred(net);
95378 return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
95379 }
95380
95381 @@ -269,19 +269,18 @@ static struct ctl_table xfrm4_policy_table[] = {
95382
95383 static int __net_init xfrm4_net_init(struct net *net)
95384 {
95385 - struct ctl_table *table;
95386 + ctl_table_no_const *table = NULL;
95387 struct ctl_table_header *hdr;
95388
95389 - table = xfrm4_policy_table;
95390 if (!net_eq(net, &init_net)) {
95391 - table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
95392 + table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
95393 if (!table)
95394 goto err_alloc;
95395
95396 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
95397 - }
95398 -
95399 - hdr = register_net_sysctl(net, "net/ipv4", table);
95400 + hdr = register_net_sysctl(net, "net/ipv4", table);
95401 + } else
95402 + hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
95403 if (!hdr)
95404 goto err_reg;
95405
95406 @@ -289,8 +288,7 @@ static int __net_init xfrm4_net_init(struct net *net)
95407 return 0;
95408
95409 err_reg:
95410 - if (!net_eq(net, &init_net))
95411 - kfree(table);
95412 + kfree(table);
95413 err_alloc:
95414 return -ENOMEM;
95415 }
95416 diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
95417 index cd3fb30..b7dfef7 100644
95418 --- a/net/ipv6/addrconf.c
95419 +++ b/net/ipv6/addrconf.c
95420 @@ -586,7 +586,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
95421 idx = 0;
95422 head = &net->dev_index_head[h];
95423 rcu_read_lock();
95424 - cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
95425 + cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
95426 net->dev_base_seq;
95427 hlist_for_each_entry_rcu(dev, head, index_hlist) {
95428 if (idx < s_idx)
95429 @@ -2364,7 +2364,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
95430 p.iph.ihl = 5;
95431 p.iph.protocol = IPPROTO_IPV6;
95432 p.iph.ttl = 64;
95433 - ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
95434 + ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
95435
95436 if (ops->ndo_do_ioctl) {
95437 mm_segment_t oldfs = get_fs();
95438 @@ -3977,7 +3977,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
95439 s_ip_idx = ip_idx = cb->args[2];
95440
95441 rcu_read_lock();
95442 - cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
95443 + cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
95444 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
95445 idx = 0;
95446 head = &net->dev_index_head[h];
95447 @@ -4603,7 +4603,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
95448 dst_free(&ifp->rt->dst);
95449 break;
95450 }
95451 - atomic_inc(&net->ipv6.dev_addr_genid);
95452 + atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
95453 rt_genid_bump_ipv6(net);
95454 }
95455
95456 @@ -4624,7 +4624,7 @@ int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
95457 int *valp = ctl->data;
95458 int val = *valp;
95459 loff_t pos = *ppos;
95460 - struct ctl_table lctl;
95461 + ctl_table_no_const lctl;
95462 int ret;
95463
95464 /*
95465 @@ -4709,7 +4709,7 @@ int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
95466 int *valp = ctl->data;
95467 int val = *valp;
95468 loff_t pos = *ppos;
95469 - struct ctl_table lctl;
95470 + ctl_table_no_const lctl;
95471 int ret;
95472
95473 /*
95474 diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
95475 index 8132b44..b8eca70 100644
95476 --- a/net/ipv6/af_inet6.c
95477 +++ b/net/ipv6/af_inet6.c
95478 @@ -767,7 +767,7 @@ static int __net_init inet6_net_init(struct net *net)
95479
95480 net->ipv6.sysctl.bindv6only = 0;
95481 net->ipv6.sysctl.icmpv6_time = 1*HZ;
95482 - atomic_set(&net->ipv6.rt_genid, 0);
95483 + atomic_set_unchecked(&net->ipv6.rt_genid, 0);
95484
95485 err = ipv6_init_mibs(net);
95486 if (err)
95487 diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
95488 index c66c6df..f375d3c 100644
95489 --- a/net/ipv6/datagram.c
95490 +++ b/net/ipv6/datagram.c
95491 @@ -908,5 +908,5 @@ void ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
95492 0,
95493 sock_i_ino(sp),
95494 atomic_read(&sp->sk_refcnt), sp,
95495 - atomic_read(&sp->sk_drops));
95496 + atomic_read_unchecked(&sp->sk_drops));
95497 }
95498 diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
95499 index eef8d94..cfa1852 100644
95500 --- a/net/ipv6/icmp.c
95501 +++ b/net/ipv6/icmp.c
95502 @@ -997,7 +997,7 @@ struct ctl_table ipv6_icmp_table_template[] = {
95503
95504 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
95505 {
95506 - struct ctl_table *table;
95507 + ctl_table_no_const *table;
95508
95509 table = kmemdup(ipv6_icmp_table_template,
95510 sizeof(ipv6_icmp_table_template),
95511 diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
95512 index bf4a9a0..e5f6ac5 100644
95513 --- a/net/ipv6/ip6_gre.c
95514 +++ b/net/ipv6/ip6_gre.c
95515 @@ -74,7 +74,7 @@ struct ip6gre_net {
95516 struct net_device *fb_tunnel_dev;
95517 };
95518
95519 -static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
95520 +static struct rtnl_link_ops ip6gre_link_ops;
95521 static int ip6gre_tunnel_init(struct net_device *dev);
95522 static void ip6gre_tunnel_setup(struct net_device *dev);
95523 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
95524 @@ -1286,7 +1286,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
95525 }
95526
95527
95528 -static struct inet6_protocol ip6gre_protocol __read_mostly = {
95529 +static struct inet6_protocol ip6gre_protocol = {
95530 .handler = ip6gre_rcv,
95531 .err_handler = ip6gre_err,
95532 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
95533 @@ -1622,7 +1622,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
95534 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
95535 };
95536
95537 -static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
95538 +static struct rtnl_link_ops ip6gre_link_ops = {
95539 .kind = "ip6gre",
95540 .maxtype = IFLA_GRE_MAX,
95541 .policy = ip6gre_policy,
95542 @@ -1635,7 +1635,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
95543 .fill_info = ip6gre_fill_info,
95544 };
95545
95546 -static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
95547 +static struct rtnl_link_ops ip6gre_tap_ops = {
95548 .kind = "ip6gretap",
95549 .maxtype = IFLA_GRE_MAX,
95550 .policy = ip6gre_policy,
95551 diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
95552 index c1e11b5..568e633 100644
95553 --- a/net/ipv6/ip6_tunnel.c
95554 +++ b/net/ipv6/ip6_tunnel.c
95555 @@ -89,7 +89,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
95556
95557 static int ip6_tnl_dev_init(struct net_device *dev);
95558 static void ip6_tnl_dev_setup(struct net_device *dev);
95559 -static struct rtnl_link_ops ip6_link_ops __read_mostly;
95560 +static struct rtnl_link_ops ip6_link_ops;
95561
95562 static int ip6_tnl_net_id __read_mostly;
95563 struct ip6_tnl_net {
95564 @@ -1699,7 +1699,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
95565 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
95566 };
95567
95568 -static struct rtnl_link_ops ip6_link_ops __read_mostly = {
95569 +static struct rtnl_link_ops ip6_link_ops = {
95570 .kind = "ip6tnl",
95571 .maxtype = IFLA_IPTUN_MAX,
95572 .policy = ip6_tnl_policy,
95573 diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
95574 index d1e2e8e..51c19ae 100644
95575 --- a/net/ipv6/ipv6_sockglue.c
95576 +++ b/net/ipv6/ipv6_sockglue.c
95577 @@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
95578 if (sk->sk_type != SOCK_STREAM)
95579 return -ENOPROTOOPT;
95580
95581 - msg.msg_control = optval;
95582 + msg.msg_control = (void __force_kernel *)optval;
95583 msg.msg_controllen = len;
95584 msg.msg_flags = flags;
95585
95586 diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
95587 index 44400c2..8e11f52 100644
95588 --- a/net/ipv6/netfilter/ip6_tables.c
95589 +++ b/net/ipv6/netfilter/ip6_tables.c
95590 @@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
95591 #endif
95592
95593 static int get_info(struct net *net, void __user *user,
95594 - const int *len, int compat)
95595 + int len, int compat)
95596 {
95597 char name[XT_TABLE_MAXNAMELEN];
95598 struct xt_table *t;
95599 int ret;
95600
95601 - if (*len != sizeof(struct ip6t_getinfo)) {
95602 - duprintf("length %u != %zu\n", *len,
95603 + if (len != sizeof(struct ip6t_getinfo)) {
95604 + duprintf("length %u != %zu\n", len,
95605 sizeof(struct ip6t_getinfo));
95606 return -EINVAL;
95607 }
95608 @@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
95609 info.size = private->size;
95610 strcpy(info.name, name);
95611
95612 - if (copy_to_user(user, &info, *len) != 0)
95613 + if (copy_to_user(user, &info, len) != 0)
95614 ret = -EFAULT;
95615 else
95616 ret = 0;
95617 @@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
95618
95619 switch (cmd) {
95620 case IP6T_SO_GET_INFO:
95621 - ret = get_info(sock_net(sk), user, len, 1);
95622 + ret = get_info(sock_net(sk), user, *len, 1);
95623 break;
95624 case IP6T_SO_GET_ENTRIES:
95625 ret = compat_get_entries(sock_net(sk), user, len);
95626 @@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
95627
95628 switch (cmd) {
95629 case IP6T_SO_GET_INFO:
95630 - ret = get_info(sock_net(sk), user, len, 0);
95631 + ret = get_info(sock_net(sk), user, *len, 0);
95632 break;
95633
95634 case IP6T_SO_GET_ENTRIES:
95635 diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
95636 index 253566a..9fa50c7 100644
95637 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c
95638 +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
95639 @@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
95640
95641 static int nf_ct_frag6_sysctl_register(struct net *net)
95642 {
95643 - struct ctl_table *table;
95644 + ctl_table_no_const *table = NULL;
95645 struct ctl_table_header *hdr;
95646
95647 - table = nf_ct_frag6_sysctl_table;
95648 if (!net_eq(net, &init_net)) {
95649 - table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
95650 + table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
95651 GFP_KERNEL);
95652 if (table == NULL)
95653 goto err_alloc;
95654 @@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
95655 table[0].data = &net->nf_frag.frags.timeout;
95656 table[1].data = &net->nf_frag.frags.low_thresh;
95657 table[2].data = &net->nf_frag.frags.high_thresh;
95658 - }
95659 -
95660 - hdr = register_net_sysctl(net, "net/netfilter", table);
95661 + hdr = register_net_sysctl(net, "net/netfilter", table);
95662 + } else
95663 + hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
95664 if (hdr == NULL)
95665 goto err_reg;
95666
95667 @@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
95668 return 0;
95669
95670 err_reg:
95671 - if (!net_eq(net, &init_net))
95672 - kfree(table);
95673 + kfree(table);
95674 err_alloc:
95675 return -ENOMEM;
95676 }
95677 diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
95678 index 827f795..7e28e82 100644
95679 --- a/net/ipv6/output_core.c
95680 +++ b/net/ipv6/output_core.c
95681 @@ -9,8 +9,8 @@
95682
95683 void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
95684 {
95685 - static atomic_t ipv6_fragmentation_id;
95686 - int old, new;
95687 + static atomic_unchecked_t ipv6_fragmentation_id;
95688 + int id;
95689
95690 #if IS_ENABLED(CONFIG_IPV6)
95691 if (rt && !(rt->dst.flags & DST_NOPEER)) {
95692 @@ -26,13 +26,10 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
95693 }
95694 }
95695 #endif
95696 - do {
95697 - old = atomic_read(&ipv6_fragmentation_id);
95698 - new = old + 1;
95699 - if (!new)
95700 - new = 1;
95701 - } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
95702 - fhdr->identification = htonl(new);
95703 + id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
95704 + if (!id)
95705 + id = atomic_inc_return_unchecked(&ipv6_fragmentation_id);
95706 + fhdr->identification = htonl(id);
95707 }
95708 EXPORT_SYMBOL(ipv6_select_ident);
95709
95710 diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
95711 index 7856e96..75ebc7f 100644
95712 --- a/net/ipv6/ping.c
95713 +++ b/net/ipv6/ping.c
95714 @@ -246,6 +246,22 @@ static struct pernet_operations ping_v6_net_ops = {
95715 };
95716 #endif
95717
95718 +static struct pingv6_ops real_pingv6_ops = {
95719 + .ipv6_recv_error = ipv6_recv_error,
95720 + .ip6_datagram_recv_ctl = ip6_datagram_recv_ctl,
95721 + .icmpv6_err_convert = icmpv6_err_convert,
95722 + .ipv6_icmp_error = ipv6_icmp_error,
95723 + .ipv6_chk_addr = ipv6_chk_addr,
95724 +};
95725 +
95726 +static struct pingv6_ops dummy_pingv6_ops = {
95727 + .ipv6_recv_error = dummy_ipv6_recv_error,
95728 + .ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl,
95729 + .icmpv6_err_convert = dummy_icmpv6_err_convert,
95730 + .ipv6_icmp_error = dummy_ipv6_icmp_error,
95731 + .ipv6_chk_addr = dummy_ipv6_chk_addr,
95732 +};
95733 +
95734 int __init pingv6_init(void)
95735 {
95736 #ifdef CONFIG_PROC_FS
95737 @@ -253,11 +269,7 @@ int __init pingv6_init(void)
95738 if (ret)
95739 return ret;
95740 #endif
95741 - pingv6_ops.ipv6_recv_error = ipv6_recv_error;
95742 - pingv6_ops.ip6_datagram_recv_ctl = ip6_datagram_recv_ctl;
95743 - pingv6_ops.icmpv6_err_convert = icmpv6_err_convert;
95744 - pingv6_ops.ipv6_icmp_error = ipv6_icmp_error;
95745 - pingv6_ops.ipv6_chk_addr = ipv6_chk_addr;
95746 + pingv6_ops = &real_pingv6_ops;
95747 return inet6_register_protosw(&pingv6_protosw);
95748 }
95749
95750 @@ -266,11 +278,7 @@ int __init pingv6_init(void)
95751 */
95752 void pingv6_exit(void)
95753 {
95754 - pingv6_ops.ipv6_recv_error = dummy_ipv6_recv_error;
95755 - pingv6_ops.ip6_datagram_recv_ctl = dummy_ip6_datagram_recv_ctl;
95756 - pingv6_ops.icmpv6_err_convert = dummy_icmpv6_err_convert;
95757 - pingv6_ops.ipv6_icmp_error = dummy_ipv6_icmp_error;
95758 - pingv6_ops.ipv6_chk_addr = dummy_ipv6_chk_addr;
95759 + pingv6_ops = &dummy_pingv6_ops;
95760 #ifdef CONFIG_PROC_FS
95761 unregister_pernet_subsys(&ping_v6_net_ops);
95762 #endif
95763 diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
95764 index 430067c..4adf088 100644
95765 --- a/net/ipv6/raw.c
95766 +++ b/net/ipv6/raw.c
95767 @@ -385,7 +385,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
95768 {
95769 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
95770 skb_checksum_complete(skb)) {
95771 - atomic_inc(&sk->sk_drops);
95772 + atomic_inc_unchecked(&sk->sk_drops);
95773 kfree_skb(skb);
95774 return NET_RX_DROP;
95775 }
95776 @@ -413,7 +413,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
95777 struct raw6_sock *rp = raw6_sk(sk);
95778
95779 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
95780 - atomic_inc(&sk->sk_drops);
95781 + atomic_inc_unchecked(&sk->sk_drops);
95782 kfree_skb(skb);
95783 return NET_RX_DROP;
95784 }
95785 @@ -437,7 +437,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
95786
95787 if (inet->hdrincl) {
95788 if (skb_checksum_complete(skb)) {
95789 - atomic_inc(&sk->sk_drops);
95790 + atomic_inc_unchecked(&sk->sk_drops);
95791 kfree_skb(skb);
95792 return NET_RX_DROP;
95793 }
95794 @@ -607,7 +607,7 @@ out:
95795 return err;
95796 }
95797
95798 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
95799 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
95800 struct flowi6 *fl6, struct dst_entry **dstp,
95801 unsigned int flags)
95802 {
95803 @@ -920,12 +920,15 @@ do_confirm:
95804 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
95805 char __user *optval, int optlen)
95806 {
95807 + struct icmp6_filter filter;
95808 +
95809 switch (optname) {
95810 case ICMPV6_FILTER:
95811 if (optlen > sizeof(struct icmp6_filter))
95812 optlen = sizeof(struct icmp6_filter);
95813 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
95814 + if (copy_from_user(&filter, optval, optlen))
95815 return -EFAULT;
95816 + raw6_sk(sk)->filter = filter;
95817 return 0;
95818 default:
95819 return -ENOPROTOOPT;
95820 @@ -938,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
95821 char __user *optval, int __user *optlen)
95822 {
95823 int len;
95824 + struct icmp6_filter filter;
95825
95826 switch (optname) {
95827 case ICMPV6_FILTER:
95828 @@ -949,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
95829 len = sizeof(struct icmp6_filter);
95830 if (put_user(len, optlen))
95831 return -EFAULT;
95832 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
95833 + filter = raw6_sk(sk)->filter;
95834 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
95835 return -EFAULT;
95836 return 0;
95837 default:
95838 diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
95839 index 1aeb473..bea761c 100644
95840 --- a/net/ipv6/reassembly.c
95841 +++ b/net/ipv6/reassembly.c
95842 @@ -626,12 +626,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
95843
95844 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
95845 {
95846 - struct ctl_table *table;
95847 + ctl_table_no_const *table = NULL;
95848 struct ctl_table_header *hdr;
95849
95850 - table = ip6_frags_ns_ctl_table;
95851 if (!net_eq(net, &init_net)) {
95852 - table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
95853 + table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
95854 if (table == NULL)
95855 goto err_alloc;
95856
95857 @@ -642,9 +641,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
95858 /* Don't export sysctls to unprivileged users */
95859 if (net->user_ns != &init_user_ns)
95860 table[0].procname = NULL;
95861 - }
95862 + hdr = register_net_sysctl(net, "net/ipv6", table);
95863 + } else
95864 + hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
95865
95866 - hdr = register_net_sysctl(net, "net/ipv6", table);
95867 if (hdr == NULL)
95868 goto err_reg;
95869
95870 @@ -652,8 +652,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
95871 return 0;
95872
95873 err_reg:
95874 - if (!net_eq(net, &init_net))
95875 - kfree(table);
95876 + kfree(table);
95877 err_alloc:
95878 return -ENOMEM;
95879 }
95880 diff --git a/net/ipv6/route.c b/net/ipv6/route.c
95881 index 77308af..36ed509 100644
95882 --- a/net/ipv6/route.c
95883 +++ b/net/ipv6/route.c
95884 @@ -3009,7 +3009,7 @@ struct ctl_table ipv6_route_table_template[] = {
95885
95886 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
95887 {
95888 - struct ctl_table *table;
95889 + ctl_table_no_const *table;
95890
95891 table = kmemdup(ipv6_route_table_template,
95892 sizeof(ipv6_route_table_template),
95893 diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
95894 index b433884..3875266 100644
95895 --- a/net/ipv6/sit.c
95896 +++ b/net/ipv6/sit.c
95897 @@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
95898 static void ipip6_dev_free(struct net_device *dev);
95899 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
95900 __be32 *v4dst);
95901 -static struct rtnl_link_ops sit_link_ops __read_mostly;
95902 +static struct rtnl_link_ops sit_link_ops;
95903
95904 static int sit_net_id __read_mostly;
95905 struct sit_net {
95906 @@ -1603,7 +1603,7 @@ static void ipip6_dellink(struct net_device *dev, struct list_head *head)
95907 unregister_netdevice_queue(dev, head);
95908 }
95909
95910 -static struct rtnl_link_ops sit_link_ops __read_mostly = {
95911 +static struct rtnl_link_ops sit_link_ops = {
95912 .kind = "sit",
95913 .maxtype = IFLA_IPTUN_MAX,
95914 .policy = ipip6_policy,
95915 diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
95916 index 107b2f1..72741a9 100644
95917 --- a/net/ipv6/sysctl_net_ipv6.c
95918 +++ b/net/ipv6/sysctl_net_ipv6.c
95919 @@ -40,7 +40,7 @@ static struct ctl_table ipv6_rotable[] = {
95920
95921 static int __net_init ipv6_sysctl_net_init(struct net *net)
95922 {
95923 - struct ctl_table *ipv6_table;
95924 + ctl_table_no_const *ipv6_table;
95925 struct ctl_table *ipv6_route_table;
95926 struct ctl_table *ipv6_icmp_table;
95927 int err;
95928 diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
95929 index 5c71501..7e8d5d3 100644
95930 --- a/net/ipv6/tcp_ipv6.c
95931 +++ b/net/ipv6/tcp_ipv6.c
95932 @@ -104,6 +104,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
95933 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
95934 }
95935
95936 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95937 +extern int grsec_enable_blackhole;
95938 +#endif
95939 +
95940 static void tcp_v6_hash(struct sock *sk)
95941 {
95942 if (sk->sk_state != TCP_CLOSE) {
95943 @@ -1397,6 +1401,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
95944 return 0;
95945
95946 reset:
95947 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95948 + if (!grsec_enable_blackhole)
95949 +#endif
95950 tcp_v6_send_reset(sk, skb);
95951 discard:
95952 if (opt_skb)
95953 @@ -1479,12 +1486,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
95954 TCP_SKB_CB(skb)->sacked = 0;
95955
95956 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
95957 - if (!sk)
95958 + if (!sk) {
95959 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95960 + ret = 1;
95961 +#endif
95962 goto no_tcp_socket;
95963 + }
95964
95965 process:
95966 - if (sk->sk_state == TCP_TIME_WAIT)
95967 + if (sk->sk_state == TCP_TIME_WAIT) {
95968 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95969 + ret = 2;
95970 +#endif
95971 goto do_time_wait;
95972 + }
95973
95974 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
95975 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
95976 @@ -1536,6 +1551,10 @@ csum_error:
95977 bad_packet:
95978 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
95979 } else {
95980 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95981 + if (!grsec_enable_blackhole || (ret == 1 &&
95982 + (skb->dev->flags & IFF_LOOPBACK)))
95983 +#endif
95984 tcp_v6_send_reset(NULL, skb);
95985 }
95986
95987 diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
95988 index 3d2758d..626c422 100644
95989 --- a/net/ipv6/udp.c
95990 +++ b/net/ipv6/udp.c
95991 @@ -53,6 +53,10 @@
95992 #include <trace/events/skb.h>
95993 #include "udp_impl.h"
95994
95995 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
95996 +extern int grsec_enable_blackhole;
95997 +#endif
95998 +
95999 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
96000 {
96001 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
96002 @@ -417,7 +421,7 @@ try_again:
96003 if (unlikely(err)) {
96004 trace_kfree_skb(skb, udpv6_recvmsg);
96005 if (!peeked) {
96006 - atomic_inc(&sk->sk_drops);
96007 + atomic_inc_unchecked(&sk->sk_drops);
96008 if (is_udp4)
96009 UDP_INC_STATS_USER(sock_net(sk),
96010 UDP_MIB_INERRORS,
96011 @@ -665,7 +669,7 @@ csum_error:
96012 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
96013 drop:
96014 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
96015 - atomic_inc(&sk->sk_drops);
96016 + atomic_inc_unchecked(&sk->sk_drops);
96017 kfree_skb(skb);
96018 return -1;
96019 }
96020 @@ -723,7 +727,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
96021 if (likely(skb1 == NULL))
96022 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
96023 if (!skb1) {
96024 - atomic_inc(&sk->sk_drops);
96025 + atomic_inc_unchecked(&sk->sk_drops);
96026 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
96027 IS_UDPLITE(sk));
96028 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
96029 @@ -863,6 +867,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
96030 goto csum_error;
96031
96032 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
96033 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
96034 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
96035 +#endif
96036 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
96037
96038 kfree_skb(skb);
96039 diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
96040 index 550b195..6acea83 100644
96041 --- a/net/ipv6/xfrm6_policy.c
96042 +++ b/net/ipv6/xfrm6_policy.c
96043 @@ -212,11 +212,11 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
96044 }
96045 }
96046
96047 -static inline int xfrm6_garbage_collect(struct dst_ops *ops)
96048 +static int xfrm6_garbage_collect(struct dst_ops *ops)
96049 {
96050 struct net *net = container_of(ops, struct net, xfrm.xfrm6_dst_ops);
96051
96052 - xfrm6_policy_afinfo.garbage_collect(net);
96053 + xfrm_garbage_collect_deferred(net);
96054 return dst_entries_get_fast(ops) > ops->gc_thresh * 2;
96055 }
96056
96057 @@ -329,19 +329,19 @@ static struct ctl_table xfrm6_policy_table[] = {
96058
96059 static int __net_init xfrm6_net_init(struct net *net)
96060 {
96061 - struct ctl_table *table;
96062 + ctl_table_no_const *table = NULL;
96063 struct ctl_table_header *hdr;
96064
96065 - table = xfrm6_policy_table;
96066 if (!net_eq(net, &init_net)) {
96067 - table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
96068 + table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
96069 if (!table)
96070 goto err_alloc;
96071
96072 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
96073 - }
96074 + hdr = register_net_sysctl(net, "net/ipv6", table);
96075 + } else
96076 + hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
96077
96078 - hdr = register_net_sysctl(net, "net/ipv6", table);
96079 if (!hdr)
96080 goto err_reg;
96081
96082 @@ -349,8 +349,7 @@ static int __net_init xfrm6_net_init(struct net *net)
96083 return 0;
96084
96085 err_reg:
96086 - if (!net_eq(net, &init_net))
96087 - kfree(table);
96088 + kfree(table);
96089 err_alloc:
96090 return -ENOMEM;
96091 }
96092 diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
96093 index 41ac7938..75e3bb1 100644
96094 --- a/net/irda/ircomm/ircomm_tty.c
96095 +++ b/net/irda/ircomm/ircomm_tty.c
96096 @@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
96097 add_wait_queue(&port->open_wait, &wait);
96098
96099 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
96100 - __FILE__, __LINE__, tty->driver->name, port->count);
96101 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
96102
96103 spin_lock_irqsave(&port->lock, flags);
96104 if (!tty_hung_up_p(filp))
96105 - port->count--;
96106 + atomic_dec(&port->count);
96107 port->blocked_open++;
96108 spin_unlock_irqrestore(&port->lock, flags);
96109
96110 @@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
96111 }
96112
96113 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
96114 - __FILE__, __LINE__, tty->driver->name, port->count);
96115 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
96116
96117 schedule();
96118 }
96119 @@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
96120
96121 spin_lock_irqsave(&port->lock, flags);
96122 if (!tty_hung_up_p(filp))
96123 - port->count++;
96124 + atomic_inc(&port->count);
96125 port->blocked_open--;
96126 spin_unlock_irqrestore(&port->lock, flags);
96127
96128 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
96129 - __FILE__, __LINE__, tty->driver->name, port->count);
96130 + __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
96131
96132 if (!retval)
96133 port->flags |= ASYNC_NORMAL_ACTIVE;
96134 @@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
96135
96136 /* ++ is not atomic, so this should be protected - Jean II */
96137 spin_lock_irqsave(&self->port.lock, flags);
96138 - self->port.count++;
96139 + atomic_inc(&self->port.count);
96140 spin_unlock_irqrestore(&self->port.lock, flags);
96141 tty_port_tty_set(&self->port, tty);
96142
96143 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
96144 - self->line, self->port.count);
96145 + self->line, atomic_read(&self->port.count));
96146
96147 /* Not really used by us, but lets do it anyway */
96148 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
96149 @@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
96150 tty_kref_put(port->tty);
96151 }
96152 port->tty = NULL;
96153 - port->count = 0;
96154 + atomic_set(&port->count, 0);
96155 spin_unlock_irqrestore(&port->lock, flags);
96156
96157 wake_up_interruptible(&port->open_wait);
96158 @@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
96159 seq_putc(m, '\n');
96160
96161 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
96162 - seq_printf(m, "Open count: %d\n", self->port.count);
96163 + seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
96164 seq_printf(m, "Max data size: %d\n", self->max_data_size);
96165 seq_printf(m, "Max header size: %d\n", self->max_header_size);
96166
96167 diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
96168 index c4b7218..3e83259 100644
96169 --- a/net/iucv/af_iucv.c
96170 +++ b/net/iucv/af_iucv.c
96171 @@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
96172
96173 write_lock_bh(&iucv_sk_list.lock);
96174
96175 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
96176 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
96177 while (__iucv_get_sock_by_name(name)) {
96178 sprintf(name, "%08x",
96179 - atomic_inc_return(&iucv_sk_list.autobind_name));
96180 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
96181 }
96182
96183 write_unlock_bh(&iucv_sk_list.lock);
96184 diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
96185 index cd5b8ec..f205e6b 100644
96186 --- a/net/iucv/iucv.c
96187 +++ b/net/iucv/iucv.c
96188 @@ -690,7 +690,7 @@ static int iucv_cpu_notify(struct notifier_block *self,
96189 return NOTIFY_OK;
96190 }
96191
96192 -static struct notifier_block __refdata iucv_cpu_notifier = {
96193 +static struct notifier_block iucv_cpu_notifier = {
96194 .notifier_call = iucv_cpu_notify,
96195 };
96196
96197 diff --git a/net/key/af_key.c b/net/key/af_key.c
96198 index 545f047..9757a9d 100644
96199 --- a/net/key/af_key.c
96200 +++ b/net/key/af_key.c
96201 @@ -3041,10 +3041,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
96202 static u32 get_acqseq(void)
96203 {
96204 u32 res;
96205 - static atomic_t acqseq;
96206 + static atomic_unchecked_t acqseq;
96207
96208 do {
96209 - res = atomic_inc_return(&acqseq);
96210 + res = atomic_inc_return_unchecked(&acqseq);
96211 } while (!res);
96212 return res;
96213 }
96214 diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
96215 index 9903ee5..18978be 100644
96216 --- a/net/mac80211/cfg.c
96217 +++ b/net/mac80211/cfg.c
96218 @@ -826,7 +826,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
96219 ret = ieee80211_vif_use_channel(sdata, chandef,
96220 IEEE80211_CHANCTX_EXCLUSIVE);
96221 }
96222 - } else if (local->open_count == local->monitors) {
96223 + } else if (local_read(&local->open_count) == local->monitors) {
96224 local->_oper_chandef = *chandef;
96225 ieee80211_hw_config(local, 0);
96226 }
96227 @@ -3124,7 +3124,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
96228 else
96229 local->probe_req_reg--;
96230
96231 - if (!local->open_count)
96232 + if (!local_read(&local->open_count))
96233 break;
96234
96235 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
96236 @@ -3587,8 +3587,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
96237 if (chanctx_conf) {
96238 *chandef = chanctx_conf->def;
96239 ret = 0;
96240 - } else if (local->open_count > 0 &&
96241 - local->open_count == local->monitors &&
96242 + } else if (local_read(&local->open_count) > 0 &&
96243 + local_read(&local->open_count) == local->monitors &&
96244 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
96245 if (local->use_chanctx)
96246 *chandef = local->monitor_chandef;
96247 diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
96248 index 611abfc..8c2c7e4 100644
96249 --- a/net/mac80211/ieee80211_i.h
96250 +++ b/net/mac80211/ieee80211_i.h
96251 @@ -28,6 +28,7 @@
96252 #include <net/ieee80211_radiotap.h>
96253 #include <net/cfg80211.h>
96254 #include <net/mac80211.h>
96255 +#include <asm/local.h>
96256 #include "key.h"
96257 #include "sta_info.h"
96258 #include "debug.h"
96259 @@ -945,7 +946,7 @@ struct ieee80211_local {
96260 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
96261 spinlock_t queue_stop_reason_lock;
96262
96263 - int open_count;
96264 + local_t open_count;
96265 int monitors, cooked_mntrs;
96266 /* number of interfaces with corresponding FIF_ flags */
96267 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
96268 diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
96269 index fcecd63..a404454 100644
96270 --- a/net/mac80211/iface.c
96271 +++ b/net/mac80211/iface.c
96272 @@ -519,7 +519,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
96273 break;
96274 }
96275
96276 - if (local->open_count == 0) {
96277 + if (local_read(&local->open_count) == 0) {
96278 res = drv_start(local);
96279 if (res)
96280 goto err_del_bss;
96281 @@ -566,7 +566,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
96282 res = drv_add_interface(local, sdata);
96283 if (res)
96284 goto err_stop;
96285 - } else if (local->monitors == 0 && local->open_count == 0) {
96286 + } else if (local->monitors == 0 && local_read(&local->open_count) == 0) {
96287 res = ieee80211_add_virtual_monitor(local);
96288 if (res)
96289 goto err_stop;
96290 @@ -675,7 +675,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
96291 atomic_inc(&local->iff_promiscs);
96292
96293 if (coming_up)
96294 - local->open_count++;
96295 + local_inc(&local->open_count);
96296
96297 if (hw_reconf_flags)
96298 ieee80211_hw_config(local, hw_reconf_flags);
96299 @@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
96300 err_del_interface:
96301 drv_remove_interface(local, sdata);
96302 err_stop:
96303 - if (!local->open_count)
96304 + if (!local_read(&local->open_count))
96305 drv_stop(local);
96306 err_del_bss:
96307 sdata->bss = NULL;
96308 @@ -852,7 +852,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
96309 }
96310
96311 if (going_down)
96312 - local->open_count--;
96313 + local_dec(&local->open_count);
96314
96315 switch (sdata->vif.type) {
96316 case NL80211_IFTYPE_AP_VLAN:
96317 @@ -919,7 +919,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
96318 }
96319 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
96320
96321 - if (local->open_count == 0)
96322 + if (local_read(&local->open_count) == 0)
96323 ieee80211_clear_tx_pending(local);
96324
96325 /*
96326 @@ -959,7 +959,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
96327
96328 ieee80211_recalc_ps(local, -1);
96329
96330 - if (local->open_count == 0) {
96331 + if (local_read(&local->open_count) == 0) {
96332 ieee80211_stop_device(local);
96333
96334 /* no reconfiguring after stop! */
96335 @@ -970,7 +970,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
96336 ieee80211_configure_filter(local);
96337 ieee80211_hw_config(local, hw_reconf_flags);
96338
96339 - if (local->monitors == local->open_count)
96340 + if (local->monitors == local_read(&local->open_count))
96341 ieee80211_add_virtual_monitor(local);
96342 }
96343
96344 diff --git a/net/mac80211/main.c b/net/mac80211/main.c
96345 index e765f77..dfd72e7 100644
96346 --- a/net/mac80211/main.c
96347 +++ b/net/mac80211/main.c
96348 @@ -172,7 +172,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
96349 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
96350 IEEE80211_CONF_CHANGE_POWER);
96351
96352 - if (changed && local->open_count) {
96353 + if (changed && local_read(&local->open_count)) {
96354 ret = drv_config(local, changed);
96355 /*
96356 * Goal:
96357 diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
96358 index 3401262..d5cd68d 100644
96359 --- a/net/mac80211/pm.c
96360 +++ b/net/mac80211/pm.c
96361 @@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
96362 struct ieee80211_sub_if_data *sdata;
96363 struct sta_info *sta;
96364
96365 - if (!local->open_count)
96366 + if (!local_read(&local->open_count))
96367 goto suspend;
96368
96369 ieee80211_scan_cancel(local);
96370 @@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
96371 cancel_work_sync(&local->dynamic_ps_enable_work);
96372 del_timer_sync(&local->dynamic_ps_timer);
96373
96374 - local->wowlan = wowlan && local->open_count;
96375 + local->wowlan = wowlan && local_read(&local->open_count);
96376 if (local->wowlan) {
96377 int err = drv_suspend(local, wowlan);
96378 if (err < 0) {
96379 @@ -116,7 +116,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
96380 WARN_ON(!list_empty(&local->chanctx_list));
96381
96382 /* stop hardware - this must stop RX */
96383 - if (local->open_count)
96384 + if (local_read(&local->open_count))
96385 ieee80211_stop_device(local);
96386
96387 suspend:
96388 diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
96389 index e126605..73d2c39 100644
96390 --- a/net/mac80211/rate.c
96391 +++ b/net/mac80211/rate.c
96392 @@ -725,7 +725,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
96393
96394 ASSERT_RTNL();
96395
96396 - if (local->open_count)
96397 + if (local_read(&local->open_count))
96398 return -EBUSY;
96399
96400 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
96401 diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
96402 index c97a065..ff61928 100644
96403 --- a/net/mac80211/rc80211_pid_debugfs.c
96404 +++ b/net/mac80211/rc80211_pid_debugfs.c
96405 @@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
96406
96407 spin_unlock_irqrestore(&events->lock, status);
96408
96409 - if (copy_to_user(buf, pb, p))
96410 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
96411 return -EFAULT;
96412
96413 return p;
96414 diff --git a/net/mac80211/util.c b/net/mac80211/util.c
96415 index 69e4ef5..e8e4b92 100644
96416 --- a/net/mac80211/util.c
96417 +++ b/net/mac80211/util.c
96418 @@ -1470,7 +1470,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
96419 }
96420 #endif
96421 /* everything else happens only if HW was up & running */
96422 - if (!local->open_count)
96423 + if (!local_read(&local->open_count))
96424 goto wake_up;
96425
96426 /*
96427 @@ -1695,7 +1695,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
96428 local->in_reconfig = false;
96429 barrier();
96430
96431 - if (local->monitors == local->open_count && local->monitors > 0)
96432 + if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
96433 ieee80211_add_virtual_monitor(local);
96434
96435 /*
96436 diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
96437 index 6e839b6..002a233 100644
96438 --- a/net/netfilter/Kconfig
96439 +++ b/net/netfilter/Kconfig
96440 @@ -950,6 +950,16 @@ config NETFILTER_XT_MATCH_ESP
96441
96442 To compile it as a module, choose M here. If unsure, say N.
96443
96444 +config NETFILTER_XT_MATCH_GRADM
96445 + tristate '"gradm" match support'
96446 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
96447 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
96448 + ---help---
96449 + The gradm match allows to match on grsecurity RBAC being enabled.
96450 + It is useful when iptables rules are applied early on bootup to
96451 + prevent connections to the machine (except from a trusted host)
96452 + while the RBAC system is disabled.
96453 +
96454 config NETFILTER_XT_MATCH_HASHLIMIT
96455 tristate '"hashlimit" match support'
96456 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
96457 diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
96458 index c3a0a12..90b587f 100644
96459 --- a/net/netfilter/Makefile
96460 +++ b/net/netfilter/Makefile
96461 @@ -112,6 +112,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
96462 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
96463 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
96464 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
96465 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
96466 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
96467 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
96468 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
96469 diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
96470 index f2e30fb..f131862 100644
96471 --- a/net/netfilter/ipset/ip_set_core.c
96472 +++ b/net/netfilter/ipset/ip_set_core.c
96473 @@ -1819,7 +1819,7 @@ done:
96474 return ret;
96475 }
96476
96477 -static struct nf_sockopt_ops so_set __read_mostly = {
96478 +static struct nf_sockopt_ops so_set = {
96479 .pf = PF_INET,
96480 .get_optmin = SO_IP_SET,
96481 .get_optmax = SO_IP_SET + 1,
96482 diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
96483 index 4c8e5c0..5a79b4d 100644
96484 --- a/net/netfilter/ipvs/ip_vs_conn.c
96485 +++ b/net/netfilter/ipvs/ip_vs_conn.c
96486 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
96487 /* Increase the refcnt counter of the dest */
96488 ip_vs_dest_hold(dest);
96489
96490 - conn_flags = atomic_read(&dest->conn_flags);
96491 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
96492 if (cp->protocol != IPPROTO_UDP)
96493 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
96494 flags = cp->flags;
96495 @@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
96496
96497 cp->control = NULL;
96498 atomic_set(&cp->n_control, 0);
96499 - atomic_set(&cp->in_pkts, 0);
96500 + atomic_set_unchecked(&cp->in_pkts, 0);
96501
96502 cp->packet_xmit = NULL;
96503 cp->app = NULL;
96504 @@ -1188,7 +1188,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
96505
96506 /* Don't drop the entry if its number of incoming packets is not
96507 located in [0, 8] */
96508 - i = atomic_read(&cp->in_pkts);
96509 + i = atomic_read_unchecked(&cp->in_pkts);
96510 if (i > 8 || i < 0) return 0;
96511
96512 if (!todrop_rate[i]) return 0;
96513 diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
96514 index 3581736..c0453e9 100644
96515 --- a/net/netfilter/ipvs/ip_vs_core.c
96516 +++ b/net/netfilter/ipvs/ip_vs_core.c
96517 @@ -567,7 +567,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
96518 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
96519 /* do not touch skb anymore */
96520
96521 - atomic_inc(&cp->in_pkts);
96522 + atomic_inc_unchecked(&cp->in_pkts);
96523 ip_vs_conn_put(cp);
96524 return ret;
96525 }
96526 @@ -1706,7 +1706,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
96527 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
96528 pkts = sysctl_sync_threshold(ipvs);
96529 else
96530 - pkts = atomic_add_return(1, &cp->in_pkts);
96531 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
96532
96533 if (ipvs->sync_state & IP_VS_STATE_MASTER)
96534 ip_vs_sync_conn(net, cp, pkts);
96535 diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
96536 index a3df9bd..895ae09 100644
96537 --- a/net/netfilter/ipvs/ip_vs_ctl.c
96538 +++ b/net/netfilter/ipvs/ip_vs_ctl.c
96539 @@ -794,7 +794,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
96540 */
96541 ip_vs_rs_hash(ipvs, dest);
96542 }
96543 - atomic_set(&dest->conn_flags, conn_flags);
96544 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
96545
96546 /* bind the service */
96547 old_svc = rcu_dereference_protected(dest->svc, 1);
96548 @@ -1641,7 +1641,7 @@ proc_do_sync_ports(struct ctl_table *table, int write,
96549 * align with netns init in ip_vs_control_net_init()
96550 */
96551
96552 -static struct ctl_table vs_vars[] = {
96553 +static ctl_table_no_const vs_vars[] __read_only = {
96554 {
96555 .procname = "amemthresh",
96556 .maxlen = sizeof(int),
96557 @@ -2062,7 +2062,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
96558 " %-7s %-6d %-10d %-10d\n",
96559 &dest->addr.in6,
96560 ntohs(dest->port),
96561 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
96562 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
96563 atomic_read(&dest->weight),
96564 atomic_read(&dest->activeconns),
96565 atomic_read(&dest->inactconns));
96566 @@ -2073,7 +2073,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
96567 "%-7s %-6d %-10d %-10d\n",
96568 ntohl(dest->addr.ip),
96569 ntohs(dest->port),
96570 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
96571 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
96572 atomic_read(&dest->weight),
96573 atomic_read(&dest->activeconns),
96574 atomic_read(&dest->inactconns));
96575 @@ -2551,7 +2551,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
96576
96577 entry.addr = dest->addr.ip;
96578 entry.port = dest->port;
96579 - entry.conn_flags = atomic_read(&dest->conn_flags);
96580 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
96581 entry.weight = atomic_read(&dest->weight);
96582 entry.u_threshold = dest->u_threshold;
96583 entry.l_threshold = dest->l_threshold;
96584 @@ -3094,7 +3094,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
96585 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
96586 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
96587 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
96588 - (atomic_read(&dest->conn_flags) &
96589 + (atomic_read_unchecked(&dest->conn_flags) &
96590 IP_VS_CONN_F_FWD_MASK)) ||
96591 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
96592 atomic_read(&dest->weight)) ||
96593 @@ -3684,7 +3684,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
96594 {
96595 int idx;
96596 struct netns_ipvs *ipvs = net_ipvs(net);
96597 - struct ctl_table *tbl;
96598 + ctl_table_no_const *tbl;
96599
96600 atomic_set(&ipvs->dropentry, 0);
96601 spin_lock_init(&ipvs->dropentry_lock);
96602 diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
96603 index eff13c9..c1aab3e 100644
96604 --- a/net/netfilter/ipvs/ip_vs_lblc.c
96605 +++ b/net/netfilter/ipvs/ip_vs_lblc.c
96606 @@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
96607 * IPVS LBLC sysctl table
96608 */
96609 #ifdef CONFIG_SYSCTL
96610 -static struct ctl_table vs_vars_table[] = {
96611 +static ctl_table_no_const vs_vars_table[] __read_only = {
96612 {
96613 .procname = "lblc_expiration",
96614 .data = NULL,
96615 diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
96616 index 0b85500..8513fa5 100644
96617 --- a/net/netfilter/ipvs/ip_vs_lblcr.c
96618 +++ b/net/netfilter/ipvs/ip_vs_lblcr.c
96619 @@ -289,7 +289,7 @@ struct ip_vs_lblcr_table {
96620 * IPVS LBLCR sysctl table
96621 */
96622
96623 -static struct ctl_table vs_vars_table[] = {
96624 +static ctl_table_no_const vs_vars_table[] __read_only = {
96625 {
96626 .procname = "lblcr_expiration",
96627 .data = NULL,
96628 diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
96629 index f448471..995f131 100644
96630 --- a/net/netfilter/ipvs/ip_vs_sync.c
96631 +++ b/net/netfilter/ipvs/ip_vs_sync.c
96632 @@ -609,7 +609,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
96633 cp = cp->control;
96634 if (cp) {
96635 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
96636 - pkts = atomic_add_return(1, &cp->in_pkts);
96637 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
96638 else
96639 pkts = sysctl_sync_threshold(ipvs);
96640 ip_vs_sync_conn(net, cp->control, pkts);
96641 @@ -771,7 +771,7 @@ control:
96642 if (!cp)
96643 return;
96644 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
96645 - pkts = atomic_add_return(1, &cp->in_pkts);
96646 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
96647 else
96648 pkts = sysctl_sync_threshold(ipvs);
96649 goto sloop;
96650 @@ -895,7 +895,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
96651
96652 if (opt)
96653 memcpy(&cp->in_seq, opt, sizeof(*opt));
96654 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
96655 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
96656 cp->state = state;
96657 cp->old_state = cp->state;
96658 /*
96659 diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
96660 index c47444e..b0961c6 100644
96661 --- a/net/netfilter/ipvs/ip_vs_xmit.c
96662 +++ b/net/netfilter/ipvs/ip_vs_xmit.c
96663 @@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
96664 else
96665 rc = NF_ACCEPT;
96666 /* do not touch skb anymore */
96667 - atomic_inc(&cp->in_pkts);
96668 + atomic_inc_unchecked(&cp->in_pkts);
96669 goto out;
96670 }
96671
96672 @@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
96673 else
96674 rc = NF_ACCEPT;
96675 /* do not touch skb anymore */
96676 - atomic_inc(&cp->in_pkts);
96677 + atomic_inc_unchecked(&cp->in_pkts);
96678 goto out;
96679 }
96680
96681 diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
96682 index 2d3030a..7ba1c0a 100644
96683 --- a/net/netfilter/nf_conntrack_acct.c
96684 +++ b/net/netfilter/nf_conntrack_acct.c
96685 @@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
96686 #ifdef CONFIG_SYSCTL
96687 static int nf_conntrack_acct_init_sysctl(struct net *net)
96688 {
96689 - struct ctl_table *table;
96690 + ctl_table_no_const *table;
96691
96692 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
96693 GFP_KERNEL);
96694 diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
96695 index 5d892fe..d2fc9d8 100644
96696 --- a/net/netfilter/nf_conntrack_core.c
96697 +++ b/net/netfilter/nf_conntrack_core.c
96698 @@ -1600,6 +1600,10 @@ void nf_conntrack_init_end(void)
96699 #define DYING_NULLS_VAL ((1<<30)+1)
96700 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
96701
96702 +#ifdef CONFIG_GRKERNSEC_HIDESYM
96703 +static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
96704 +#endif
96705 +
96706 int nf_conntrack_init_net(struct net *net)
96707 {
96708 int ret;
96709 @@ -1614,7 +1618,11 @@ int nf_conntrack_init_net(struct net *net)
96710 goto err_stat;
96711 }
96712
96713 +#ifdef CONFIG_GRKERNSEC_HIDESYM
96714 + net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
96715 +#else
96716 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
96717 +#endif
96718 if (!net->ct.slabname) {
96719 ret = -ENOMEM;
96720 goto err_slabname;
96721 diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
96722 index 1df1761..ce8b88a 100644
96723 --- a/net/netfilter/nf_conntrack_ecache.c
96724 +++ b/net/netfilter/nf_conntrack_ecache.c
96725 @@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
96726 #ifdef CONFIG_SYSCTL
96727 static int nf_conntrack_event_init_sysctl(struct net *net)
96728 {
96729 - struct ctl_table *table;
96730 + ctl_table_no_const *table;
96731
96732 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
96733 GFP_KERNEL);
96734 diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
96735 index 974a2a4..52cc6ff 100644
96736 --- a/net/netfilter/nf_conntrack_helper.c
96737 +++ b/net/netfilter/nf_conntrack_helper.c
96738 @@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
96739
96740 static int nf_conntrack_helper_init_sysctl(struct net *net)
96741 {
96742 - struct ctl_table *table;
96743 + ctl_table_no_const *table;
96744
96745 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
96746 GFP_KERNEL);
96747 diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
96748 index ce30041..3861b5d 100644
96749 --- a/net/netfilter/nf_conntrack_proto.c
96750 +++ b/net/netfilter/nf_conntrack_proto.c
96751 @@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
96752
96753 static void
96754 nf_ct_unregister_sysctl(struct ctl_table_header **header,
96755 - struct ctl_table **table,
96756 + ctl_table_no_const **table,
96757 unsigned int users)
96758 {
96759 if (users > 0)
96760 diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
96761 index a99b6c3..cb372f9 100644
96762 --- a/net/netfilter/nf_conntrack_proto_dccp.c
96763 +++ b/net/netfilter/nf_conntrack_proto_dccp.c
96764 @@ -428,7 +428,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
96765 const char *msg;
96766 u_int8_t state;
96767
96768 - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
96769 + dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
96770 BUG_ON(dh == NULL);
96771
96772 state = dccp_state_table[CT_DCCP_ROLE_CLIENT][dh->dccph_type][CT_DCCP_NONE];
96773 @@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
96774 out_invalid:
96775 if (LOG_INVALID(net, IPPROTO_DCCP))
96776 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
96777 - NULL, msg);
96778 + NULL, "%s", msg);
96779 return false;
96780 }
96781
96782 @@ -486,7 +486,7 @@ static int dccp_packet(struct nf_conn *ct, const struct sk_buff *skb,
96783 u_int8_t type, old_state, new_state;
96784 enum ct_dccp_roles role;
96785
96786 - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
96787 + dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
96788 BUG_ON(dh == NULL);
96789 type = dh->dccph_type;
96790
96791 @@ -577,7 +577,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
96792 unsigned int cscov;
96793 const char *msg;
96794
96795 - dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &dh);
96796 + dh = skb_header_pointer(skb, dataoff, sizeof(_dh), &_dh);
96797 if (dh == NULL) {
96798 msg = "nf_ct_dccp: short packet ";
96799 goto out_invalid;
96800 @@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
96801
96802 out_invalid:
96803 if (LOG_INVALID(net, IPPROTO_DCCP))
96804 - nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
96805 + nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
96806 return -NF_ACCEPT;
96807 }
96808
96809 diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
96810 index f641751..d3c5b51 100644
96811 --- a/net/netfilter/nf_conntrack_standalone.c
96812 +++ b/net/netfilter/nf_conntrack_standalone.c
96813 @@ -471,7 +471,7 @@ static struct ctl_table nf_ct_netfilter_table[] = {
96814
96815 static int nf_conntrack_standalone_init_sysctl(struct net *net)
96816 {
96817 - struct ctl_table *table;
96818 + ctl_table_no_const *table;
96819
96820 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
96821 GFP_KERNEL);
96822 diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
96823 index 902fb0a..87f7fdb 100644
96824 --- a/net/netfilter/nf_conntrack_timestamp.c
96825 +++ b/net/netfilter/nf_conntrack_timestamp.c
96826 @@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
96827 #ifdef CONFIG_SYSCTL
96828 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
96829 {
96830 - struct ctl_table *table;
96831 + ctl_table_no_const *table;
96832
96833 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
96834 GFP_KERNEL);
96835 diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
96836 index 85296d4..8becdec 100644
96837 --- a/net/netfilter/nf_log.c
96838 +++ b/net/netfilter/nf_log.c
96839 @@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
96840
96841 #ifdef CONFIG_SYSCTL
96842 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
96843 -static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
96844 +static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
96845
96846 static int nf_log_proc_dostring(struct ctl_table *table, int write,
96847 void __user *buffer, size_t *lenp, loff_t *ppos)
96848 @@ -274,14 +274,16 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
96849 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
96850 mutex_unlock(&nf_log_mutex);
96851 } else {
96852 + ctl_table_no_const nf_log_table = *table;
96853 +
96854 mutex_lock(&nf_log_mutex);
96855 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
96856 lockdep_is_held(&nf_log_mutex));
96857 if (!logger)
96858 - table->data = "NONE";
96859 + nf_log_table.data = "NONE";
96860 else
96861 - table->data = logger->name;
96862 - r = proc_dostring(table, write, buffer, lenp, ppos);
96863 + nf_log_table.data = logger->name;
96864 + r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
96865 mutex_unlock(&nf_log_mutex);
96866 }
96867
96868 diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
96869 index f042ae5..30ea486 100644
96870 --- a/net/netfilter/nf_sockopt.c
96871 +++ b/net/netfilter/nf_sockopt.c
96872 @@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
96873 }
96874 }
96875
96876 - list_add(&reg->list, &nf_sockopts);
96877 + pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
96878 out:
96879 mutex_unlock(&nf_sockopt_mutex);
96880 return ret;
96881 @@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
96882 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
96883 {
96884 mutex_lock(&nf_sockopt_mutex);
96885 - list_del(&reg->list);
96886 + pax_list_del((struct list_head *)&reg->list);
96887 mutex_unlock(&nf_sockopt_mutex);
96888 }
96889 EXPORT_SYMBOL(nf_unregister_sockopt);
96890 diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
96891 index d92cc31..e46f350 100644
96892 --- a/net/netfilter/nfnetlink_log.c
96893 +++ b/net/netfilter/nfnetlink_log.c
96894 @@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
96895 struct nfnl_log_net {
96896 spinlock_t instances_lock;
96897 struct hlist_head instance_table[INSTANCE_BUCKETS];
96898 - atomic_t global_seq;
96899 + atomic_unchecked_t global_seq;
96900 };
96901
96902 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
96903 @@ -563,7 +563,7 @@ __build_packet_message(struct nfnl_log_net *log,
96904 /* global sequence number */
96905 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
96906 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
96907 - htonl(atomic_inc_return(&log->global_seq))))
96908 + htonl(atomic_inc_return_unchecked(&log->global_seq))))
96909 goto nla_put_failure;
96910
96911 if (data_len) {
96912 diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
96913 new file mode 100644
96914 index 0000000..c566332
96915 --- /dev/null
96916 +++ b/net/netfilter/xt_gradm.c
96917 @@ -0,0 +1,51 @@
96918 +/*
96919 + * gradm match for netfilter
96920 + * Copyright © Zbigniew Krzystolik, 2010
96921 + *
96922 + * This program is free software; you can redistribute it and/or modify
96923 + * it under the terms of the GNU General Public License; either version
96924 + * 2 or 3 as published by the Free Software Foundation.
96925 + */
96926 +#include <linux/module.h>
96927 +#include <linux/moduleparam.h>
96928 +#include <linux/skbuff.h>
96929 +#include <linux/netfilter/x_tables.h>
96930 +#include <linux/grsecurity.h>
96931 +#include <linux/netfilter/xt_gradm.h>
96932 +
96933 +static bool
96934 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
96935 +{
96936 + const struct xt_gradm_mtinfo *info = par->matchinfo;
96937 + bool retval = false;
96938 + if (gr_acl_is_enabled())
96939 + retval = true;
96940 + return retval ^ info->invflags;
96941 +}
96942 +
96943 +static struct xt_match gradm_mt_reg __read_mostly = {
96944 + .name = "gradm",
96945 + .revision = 0,
96946 + .family = NFPROTO_UNSPEC,
96947 + .match = gradm_mt,
96948 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
96949 + .me = THIS_MODULE,
96950 +};
96951 +
96952 +static int __init gradm_mt_init(void)
96953 +{
96954 + return xt_register_match(&gradm_mt_reg);
96955 +}
96956 +
96957 +static void __exit gradm_mt_exit(void)
96958 +{
96959 + xt_unregister_match(&gradm_mt_reg);
96960 +}
96961 +
96962 +module_init(gradm_mt_init);
96963 +module_exit(gradm_mt_exit);
96964 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
96965 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
96966 +MODULE_LICENSE("GPL");
96967 +MODULE_ALIAS("ipt_gradm");
96968 +MODULE_ALIAS("ip6t_gradm");
96969 diff --git a/net/netfilter/xt_statistic.c b/net/netfilter/xt_statistic.c
96970 index 4fe4fb4..87a89e5 100644
96971 --- a/net/netfilter/xt_statistic.c
96972 +++ b/net/netfilter/xt_statistic.c
96973 @@ -19,7 +19,7 @@
96974 #include <linux/module.h>
96975
96976 struct xt_statistic_priv {
96977 - atomic_t count;
96978 + atomic_unchecked_t count;
96979 } ____cacheline_aligned_in_smp;
96980
96981 MODULE_LICENSE("GPL");
96982 @@ -42,9 +42,9 @@ statistic_mt(const struct sk_buff *skb, struct xt_action_param *par)
96983 break;
96984 case XT_STATISTIC_MODE_NTH:
96985 do {
96986 - oval = atomic_read(&info->master->count);
96987 + oval = atomic_read_unchecked(&info->master->count);
96988 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
96989 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
96990 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
96991 if (nval == 0)
96992 ret = !ret;
96993 break;
96994 @@ -64,7 +64,7 @@ static int statistic_mt_check(const struct xt_mtchk_param *par)
96995 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
96996 if (info->master == NULL)
96997 return -ENOMEM;
96998 - atomic_set(&info->master->count, info->u.nth.count);
96999 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
97000
97001 return 0;
97002 }
97003 diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
97004 index 6135635..5bdc54a 100644
97005 --- a/net/netlink/af_netlink.c
97006 +++ b/net/netlink/af_netlink.c
97007 @@ -249,7 +249,7 @@ static void netlink_overrun(struct sock *sk)
97008 sk->sk_error_report(sk);
97009 }
97010 }
97011 - atomic_inc(&sk->sk_drops);
97012 + atomic_inc_unchecked(&sk->sk_drops);
97013 }
97014
97015 static void netlink_rcv_wake(struct sock *sk)
97016 @@ -2940,7 +2940,7 @@ static int netlink_seq_show(struct seq_file *seq, void *v)
97017 sk_wmem_alloc_get(s),
97018 nlk->cb_running,
97019 atomic_read(&s->sk_refcnt),
97020 - atomic_read(&s->sk_drops),
97021 + atomic_read_unchecked(&s->sk_drops),
97022 sock_i_ino(s)
97023 );
97024
97025 diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
97026 index 0c741ce..f8c8ca7 100644
97027 --- a/net/netlink/genetlink.c
97028 +++ b/net/netlink/genetlink.c
97029 @@ -310,18 +310,20 @@ int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
97030 goto errout;
97031 }
97032
97033 + pax_open_kernel();
97034 if (ops->dumpit)
97035 - ops->flags |= GENL_CMD_CAP_DUMP;
97036 + *(unsigned int *)&ops->flags |= GENL_CMD_CAP_DUMP;
97037 if (ops->doit)
97038 - ops->flags |= GENL_CMD_CAP_DO;
97039 + *(unsigned int *)&ops->flags |= GENL_CMD_CAP_DO;
97040 if (ops->policy)
97041 - ops->flags |= GENL_CMD_CAP_HASPOL;
97042 + *(unsigned int *)&ops->flags |= GENL_CMD_CAP_HASPOL;
97043 + pax_close_kernel();
97044
97045 genl_lock_all();
97046 - list_add_tail(&ops->ops_list, &family->ops_list);
97047 + pax_list_add_tail((struct list_head *)&ops->ops_list, &family->ops_list);
97048 genl_unlock_all();
97049
97050 - genl_ctrl_event(CTRL_CMD_NEWOPS, ops);
97051 + genl_ctrl_event(CTRL_CMD_NEWOPS, (void *)ops);
97052 err = 0;
97053 errout:
97054 return err;
97055 @@ -351,9 +353,9 @@ int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
97056 genl_lock_all();
97057 list_for_each_entry(rc, &family->ops_list, ops_list) {
97058 if (rc == ops) {
97059 - list_del(&ops->ops_list);
97060 + pax_list_del((struct list_head *)&ops->ops_list);
97061 genl_unlock_all();
97062 - genl_ctrl_event(CTRL_CMD_DELOPS, ops);
97063 + genl_ctrl_event(CTRL_CMD_DELOPS, (void *)ops);
97064 return 0;
97065 }
97066 }
97067 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
97068 index 53c19a3..b0ac04a 100644
97069 --- a/net/netrom/af_netrom.c
97070 +++ b/net/netrom/af_netrom.c
97071 @@ -850,7 +850,6 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
97072 *uaddr_len = sizeof(struct full_sockaddr_ax25);
97073 } else {
97074 sax->fsa_ax25.sax25_family = AF_NETROM;
97075 - sax->fsa_ax25.sax25_ndigis = 0;
97076 sax->fsa_ax25.sax25_call = nr->source_addr;
97077 *uaddr_len = sizeof(struct sockaddr_ax25);
97078 }
97079 diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
97080 index ba2548b..1a4e98e 100644
97081 --- a/net/packet/af_packet.c
97082 +++ b/net/packet/af_packet.c
97083 @@ -1699,7 +1699,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
97084
97085 spin_lock(&sk->sk_receive_queue.lock);
97086 po->stats.stats1.tp_packets++;
97087 - skb->dropcount = atomic_read(&sk->sk_drops);
97088 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
97089 __skb_queue_tail(&sk->sk_receive_queue, skb);
97090 spin_unlock(&sk->sk_receive_queue.lock);
97091 sk->sk_data_ready(sk, skb->len);
97092 @@ -1708,7 +1708,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
97093 drop_n_acct:
97094 spin_lock(&sk->sk_receive_queue.lock);
97095 po->stats.stats1.tp_drops++;
97096 - atomic_inc(&sk->sk_drops);
97097 + atomic_inc_unchecked(&sk->sk_drops);
97098 spin_unlock(&sk->sk_receive_queue.lock);
97099
97100 drop_n_restore:
97101 @@ -3261,7 +3261,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
97102 case PACKET_HDRLEN:
97103 if (len > sizeof(int))
97104 len = sizeof(int);
97105 - if (copy_from_user(&val, optval, len))
97106 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
97107 return -EFAULT;
97108 switch (val) {
97109 case TPACKET_V1:
97110 @@ -3304,7 +3304,7 @@ static int packet_getsockopt(struct socket *sock, int level, int optname,
97111 len = lv;
97112 if (put_user(len, optlen))
97113 return -EFAULT;
97114 - if (copy_to_user(optval, data, len))
97115 + if (len > sizeof(st) || copy_to_user(optval, data, len))
97116 return -EFAULT;
97117 return 0;
97118 }
97119 diff --git a/net/phonet/pep.c b/net/phonet/pep.c
97120 index e774117..900b8b7 100644
97121 --- a/net/phonet/pep.c
97122 +++ b/net/phonet/pep.c
97123 @@ -388,7 +388,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
97124
97125 case PNS_PEP_CTRL_REQ:
97126 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
97127 - atomic_inc(&sk->sk_drops);
97128 + atomic_inc_unchecked(&sk->sk_drops);
97129 break;
97130 }
97131 __skb_pull(skb, 4);
97132 @@ -409,7 +409,7 @@ static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
97133 }
97134
97135 if (pn->rx_credits == 0) {
97136 - atomic_inc(&sk->sk_drops);
97137 + atomic_inc_unchecked(&sk->sk_drops);
97138 err = -ENOBUFS;
97139 break;
97140 }
97141 @@ -580,7 +580,7 @@ static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
97142 }
97143
97144 if (pn->rx_credits == 0) {
97145 - atomic_inc(&sk->sk_drops);
97146 + atomic_inc_unchecked(&sk->sk_drops);
97147 err = NET_RX_DROP;
97148 break;
97149 }
97150 diff --git a/net/phonet/socket.c b/net/phonet/socket.c
97151 index 77e38f7..f4ef5c3 100644
97152 --- a/net/phonet/socket.c
97153 +++ b/net/phonet/socket.c
97154 @@ -612,7 +612,7 @@ static int pn_sock_seq_show(struct seq_file *seq, void *v)
97155 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
97156 sock_i_ino(sk),
97157 atomic_read(&sk->sk_refcnt), sk,
97158 - atomic_read(&sk->sk_drops), &len);
97159 + atomic_read_unchecked(&sk->sk_drops), &len);
97160 }
97161 seq_printf(seq, "%*s\n", 127 - len, "");
97162 return 0;
97163 diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
97164 index c02a8c4..3c5b600 100644
97165 --- a/net/phonet/sysctl.c
97166 +++ b/net/phonet/sysctl.c
97167 @@ -67,7 +67,7 @@ static int proc_local_port_range(struct ctl_table *table, int write,
97168 {
97169 int ret;
97170 int range[2] = {local_port_range[0], local_port_range[1]};
97171 - struct ctl_table tmp = {
97172 + ctl_table_no_const tmp = {
97173 .data = &range,
97174 .maxlen = sizeof(range),
97175 .mode = table->mode,
97176 diff --git a/net/rds/cong.c b/net/rds/cong.c
97177 index e5b65ac..f3b6fb7 100644
97178 --- a/net/rds/cong.c
97179 +++ b/net/rds/cong.c
97180 @@ -78,7 +78,7 @@
97181 * finds that the saved generation number is smaller than the global generation
97182 * number, it wakes up the process.
97183 */
97184 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
97185 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
97186
97187 /*
97188 * Congestion monitoring
97189 @@ -233,7 +233,7 @@ void rds_cong_map_updated(struct rds_cong_map *map, uint64_t portmask)
97190 rdsdebug("waking map %p for %pI4\n",
97191 map, &map->m_addr);
97192 rds_stats_inc(s_cong_update_received);
97193 - atomic_inc(&rds_cong_generation);
97194 + atomic_inc_unchecked(&rds_cong_generation);
97195 if (waitqueue_active(&map->m_waitq))
97196 wake_up(&map->m_waitq);
97197 if (waitqueue_active(&rds_poll_waitq))
97198 @@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
97199
97200 int rds_cong_updated_since(unsigned long *recent)
97201 {
97202 - unsigned long gen = atomic_read(&rds_cong_generation);
97203 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
97204
97205 if (likely(*recent == gen))
97206 return 0;
97207 diff --git a/net/rds/ib.h b/net/rds/ib.h
97208 index 7280ab8..e04f4ea 100644
97209 --- a/net/rds/ib.h
97210 +++ b/net/rds/ib.h
97211 @@ -128,7 +128,7 @@ struct rds_ib_connection {
97212 /* sending acks */
97213 unsigned long i_ack_flags;
97214 #ifdef KERNEL_HAS_ATOMIC64
97215 - atomic64_t i_ack_next; /* next ACK to send */
97216 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
97217 #else
97218 spinlock_t i_ack_lock; /* protect i_ack_next */
97219 u64 i_ack_next; /* next ACK to send */
97220 diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c
97221 index 31b74f5..dc1fbfa 100644
97222 --- a/net/rds/ib_cm.c
97223 +++ b/net/rds/ib_cm.c
97224 @@ -717,7 +717,7 @@ void rds_ib_conn_shutdown(struct rds_connection *conn)
97225 /* Clear the ACK state */
97226 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
97227 #ifdef KERNEL_HAS_ATOMIC64
97228 - atomic64_set(&ic->i_ack_next, 0);
97229 + atomic64_set_unchecked(&ic->i_ack_next, 0);
97230 #else
97231 ic->i_ack_next = 0;
97232 #endif
97233 diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
97234 index 8eb9501..0c386ff 100644
97235 --- a/net/rds/ib_recv.c
97236 +++ b/net/rds/ib_recv.c
97237 @@ -597,7 +597,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
97238 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
97239 int ack_required)
97240 {
97241 - atomic64_set(&ic->i_ack_next, seq);
97242 + atomic64_set_unchecked(&ic->i_ack_next, seq);
97243 if (ack_required) {
97244 smp_mb__before_clear_bit();
97245 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
97246 @@ -609,7 +609,7 @@ static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
97247 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
97248 smp_mb__after_clear_bit();
97249
97250 - return atomic64_read(&ic->i_ack_next);
97251 + return atomic64_read_unchecked(&ic->i_ack_next);
97252 }
97253 #endif
97254
97255 diff --git a/net/rds/iw.h b/net/rds/iw.h
97256 index 04ce3b1..48119a6 100644
97257 --- a/net/rds/iw.h
97258 +++ b/net/rds/iw.h
97259 @@ -134,7 +134,7 @@ struct rds_iw_connection {
97260 /* sending acks */
97261 unsigned long i_ack_flags;
97262 #ifdef KERNEL_HAS_ATOMIC64
97263 - atomic64_t i_ack_next; /* next ACK to send */
97264 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
97265 #else
97266 spinlock_t i_ack_lock; /* protect i_ack_next */
97267 u64 i_ack_next; /* next ACK to send */
97268 diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c
97269 index a91e1db..cf3053f 100644
97270 --- a/net/rds/iw_cm.c
97271 +++ b/net/rds/iw_cm.c
97272 @@ -663,7 +663,7 @@ void rds_iw_conn_shutdown(struct rds_connection *conn)
97273 /* Clear the ACK state */
97274 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
97275 #ifdef KERNEL_HAS_ATOMIC64
97276 - atomic64_set(&ic->i_ack_next, 0);
97277 + atomic64_set_unchecked(&ic->i_ack_next, 0);
97278 #else
97279 ic->i_ack_next = 0;
97280 #endif
97281 diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
97282 index 4503335..db566b4 100644
97283 --- a/net/rds/iw_recv.c
97284 +++ b/net/rds/iw_recv.c
97285 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
97286 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
97287 int ack_required)
97288 {
97289 - atomic64_set(&ic->i_ack_next, seq);
97290 + atomic64_set_unchecked(&ic->i_ack_next, seq);
97291 if (ack_required) {
97292 smp_mb__before_clear_bit();
97293 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
97294 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_connection *ic)
97295 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
97296 smp_mb__after_clear_bit();
97297
97298 - return atomic64_read(&ic->i_ack_next);
97299 + return atomic64_read_unchecked(&ic->i_ack_next);
97300 }
97301 #endif
97302
97303 diff --git a/net/rds/rds.h b/net/rds/rds.h
97304 index ec1d731..90a3a8d 100644
97305 --- a/net/rds/rds.h
97306 +++ b/net/rds/rds.h
97307 @@ -449,7 +449,7 @@ struct rds_transport {
97308 void (*sync_mr)(void *trans_private, int direction);
97309 void (*free_mr)(void *trans_private, int invalidate);
97310 void (*flush_mrs)(void);
97311 -};
97312 +} __do_const;
97313
97314 struct rds_sock {
97315 struct sock rs_sk;
97316 diff --git a/net/rds/tcp.c b/net/rds/tcp.c
97317 index edac9ef..16bcb98 100644
97318 --- a/net/rds/tcp.c
97319 +++ b/net/rds/tcp.c
97320 @@ -59,7 +59,7 @@ void rds_tcp_nonagle(struct socket *sock)
97321 int val = 1;
97322
97323 set_fs(KERNEL_DS);
97324 - sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
97325 + sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __force_user *)&val,
97326 sizeof(val));
97327 set_fs(oldfs);
97328 }
97329 diff --git a/net/rds/tcp_send.c b/net/rds/tcp_send.c
97330 index 81cf5a4..b5826ff 100644
97331 --- a/net/rds/tcp_send.c
97332 +++ b/net/rds/tcp_send.c
97333 @@ -43,7 +43,7 @@ static void rds_tcp_cork(struct socket *sock, int val)
97334
97335 oldfs = get_fs();
97336 set_fs(KERNEL_DS);
97337 - sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __user *)&val,
97338 + sock->ops->setsockopt(sock, SOL_TCP, TCP_CORK, (char __force_user *)&val,
97339 sizeof(val));
97340 set_fs(oldfs);
97341 }
97342 diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
97343 index e61aa60..f07cc89 100644
97344 --- a/net/rxrpc/af_rxrpc.c
97345 +++ b/net/rxrpc/af_rxrpc.c
97346 @@ -40,7 +40,7 @@ static const struct proto_ops rxrpc_rpc_ops;
97347 __be32 rxrpc_epoch;
97348
97349 /* current debugging ID */
97350 -atomic_t rxrpc_debug_id;
97351 +atomic_unchecked_t rxrpc_debug_id;
97352
97353 /* count of skbs currently in use */
97354 atomic_t rxrpc_n_skbs;
97355 diff --git a/net/rxrpc/ar-ack.c b/net/rxrpc/ar-ack.c
97356 index e4d9cbc..b229649 100644
97357 --- a/net/rxrpc/ar-ack.c
97358 +++ b/net/rxrpc/ar-ack.c
97359 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
97360
97361 _enter("{%d,%d,%d,%d},",
97362 call->acks_hard, call->acks_unacked,
97363 - atomic_read(&call->sequence),
97364 + atomic_read_unchecked(&call->sequence),
97365 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
97366
97367 stop = 0;
97368 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
97369
97370 /* each Tx packet has a new serial number */
97371 sp->hdr.serial =
97372 - htonl(atomic_inc_return(&call->conn->serial));
97373 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
97374
97375 hdr = (struct rxrpc_header *) txb->head;
97376 hdr->serial = sp->hdr.serial;
97377 @@ -403,7 +403,7 @@ static void rxrpc_rotate_tx_window(struct rxrpc_call *call, u32 hard)
97378 */
97379 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
97380 {
97381 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
97382 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
97383 }
97384
97385 /*
97386 @@ -629,7 +629,7 @@ process_further:
97387
97388 latest = ntohl(sp->hdr.serial);
97389 hard = ntohl(ack.firstPacket);
97390 - tx = atomic_read(&call->sequence);
97391 + tx = atomic_read_unchecked(&call->sequence);
97392
97393 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
97394 latest,
97395 @@ -1161,7 +1161,7 @@ void rxrpc_process_call(struct work_struct *work)
97396 goto maybe_reschedule;
97397
97398 send_ACK_with_skew:
97399 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
97400 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
97401 ntohl(ack.serial));
97402 send_ACK:
97403 mtu = call->conn->trans->peer->if_mtu;
97404 @@ -1173,7 +1173,7 @@ send_ACK:
97405 ackinfo.rxMTU = htonl(5692);
97406 ackinfo.jumbo_max = htonl(4);
97407
97408 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
97409 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
97410 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
97411 ntohl(hdr.serial),
97412 ntohs(ack.maxSkew),
97413 @@ -1191,7 +1191,7 @@ send_ACK:
97414 send_message:
97415 _debug("send message");
97416
97417 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
97418 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
97419 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
97420 send_message_2:
97421
97422 diff --git a/net/rxrpc/ar-call.c b/net/rxrpc/ar-call.c
97423 index a3bbb36..3341fb9 100644
97424 --- a/net/rxrpc/ar-call.c
97425 +++ b/net/rxrpc/ar-call.c
97426 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
97427 spin_lock_init(&call->lock);
97428 rwlock_init(&call->state_lock);
97429 atomic_set(&call->usage, 1);
97430 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
97431 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
97432 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
97433
97434 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
97435 diff --git a/net/rxrpc/ar-connection.c b/net/rxrpc/ar-connection.c
97436 index 4106ca9..a338d7a 100644
97437 --- a/net/rxrpc/ar-connection.c
97438 +++ b/net/rxrpc/ar-connection.c
97439 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
97440 rwlock_init(&conn->lock);
97441 spin_lock_init(&conn->state_lock);
97442 atomic_set(&conn->usage, 1);
97443 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
97444 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
97445 conn->avail_calls = RXRPC_MAXCALLS;
97446 conn->size_align = 4;
97447 conn->header_size = sizeof(struct rxrpc_header);
97448 diff --git a/net/rxrpc/ar-connevent.c b/net/rxrpc/ar-connevent.c
97449 index e7ed43a..6afa140 100644
97450 --- a/net/rxrpc/ar-connevent.c
97451 +++ b/net/rxrpc/ar-connevent.c
97452 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct rxrpc_connection *conn,
97453
97454 len = iov[0].iov_len + iov[1].iov_len;
97455
97456 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
97457 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
97458 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
97459
97460 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
97461 diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c
97462 index 529572f..c758ca7 100644
97463 --- a/net/rxrpc/ar-input.c
97464 +++ b/net/rxrpc/ar-input.c
97465 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb)
97466 /* track the latest serial number on this connection for ACK packet
97467 * information */
97468 serial = ntohl(sp->hdr.serial);
97469 - hi_serial = atomic_read(&call->conn->hi_serial);
97470 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
97471 while (serial > hi_serial)
97472 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
97473 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
97474 serial);
97475
97476 /* request ACK generation for any ACK or DATA packet that requests
97477 diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
97478 index a693aca..81e7293 100644
97479 --- a/net/rxrpc/ar-internal.h
97480 +++ b/net/rxrpc/ar-internal.h
97481 @@ -272,8 +272,8 @@ struct rxrpc_connection {
97482 int error; /* error code for local abort */
97483 int debug_id; /* debug ID for printks */
97484 unsigned int call_counter; /* call ID counter */
97485 - atomic_t serial; /* packet serial number counter */
97486 - atomic_t hi_serial; /* highest serial number received */
97487 + atomic_unchecked_t serial; /* packet serial number counter */
97488 + atomic_unchecked_t hi_serial; /* highest serial number received */
97489 u8 avail_calls; /* number of calls available */
97490 u8 size_align; /* data size alignment (for security) */
97491 u8 header_size; /* rxrpc + security header size */
97492 @@ -346,7 +346,7 @@ struct rxrpc_call {
97493 spinlock_t lock;
97494 rwlock_t state_lock; /* lock for state transition */
97495 atomic_t usage;
97496 - atomic_t sequence; /* Tx data packet sequence counter */
97497 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
97498 u32 abort_code; /* local/remote abort code */
97499 enum { /* current state of call */
97500 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
97501 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
97502 */
97503 extern atomic_t rxrpc_n_skbs;
97504 extern __be32 rxrpc_epoch;
97505 -extern atomic_t rxrpc_debug_id;
97506 +extern atomic_unchecked_t rxrpc_debug_id;
97507 extern struct workqueue_struct *rxrpc_workqueue;
97508
97509 /*
97510 diff --git a/net/rxrpc/ar-local.c b/net/rxrpc/ar-local.c
97511 index 87f7135..74d3703 100644
97512 --- a/net/rxrpc/ar-local.c
97513 +++ b/net/rxrpc/ar-local.c
97514 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(struct sockaddr_rxrpc *srx)
97515 spin_lock_init(&local->lock);
97516 rwlock_init(&local->services_lock);
97517 atomic_set(&local->usage, 1);
97518 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
97519 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
97520 memcpy(&local->srx, srx, sizeof(*srx));
97521 }
97522
97523 diff --git a/net/rxrpc/ar-output.c b/net/rxrpc/ar-output.c
97524 index e1ac183..b43e10e 100644
97525 --- a/net/rxrpc/ar-output.c
97526 +++ b/net/rxrpc/ar-output.c
97527 @@ -682,9 +682,9 @@ static int rxrpc_send_data(struct kiocb *iocb,
97528 sp->hdr.cid = call->cid;
97529 sp->hdr.callNumber = call->call_id;
97530 sp->hdr.seq =
97531 - htonl(atomic_inc_return(&call->sequence));
97532 + htonl(atomic_inc_return_unchecked(&call->sequence));
97533 sp->hdr.serial =
97534 - htonl(atomic_inc_return(&conn->serial));
97535 + htonl(atomic_inc_return_unchecked(&conn->serial));
97536 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
97537 sp->hdr.userStatus = 0;
97538 sp->hdr.securityIndex = conn->security_ix;
97539 diff --git a/net/rxrpc/ar-peer.c b/net/rxrpc/ar-peer.c
97540 index bebaa43..2644591 100644
97541 --- a/net/rxrpc/ar-peer.c
97542 +++ b/net/rxrpc/ar-peer.c
97543 @@ -72,7 +72,7 @@ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx,
97544 INIT_LIST_HEAD(&peer->error_targets);
97545 spin_lock_init(&peer->lock);
97546 atomic_set(&peer->usage, 1);
97547 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
97548 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
97549 memcpy(&peer->srx, srx, sizeof(*srx));
97550
97551 rxrpc_assess_MTU_size(peer);
97552 diff --git a/net/rxrpc/ar-proc.c b/net/rxrpc/ar-proc.c
97553 index 38047f7..9f48511 100644
97554 --- a/net/rxrpc/ar-proc.c
97555 +++ b/net/rxrpc/ar-proc.c
97556 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
97557 atomic_read(&conn->usage),
97558 rxrpc_conn_states[conn->state],
97559 key_serial(conn->key),
97560 - atomic_read(&conn->serial),
97561 - atomic_read(&conn->hi_serial));
97562 + atomic_read_unchecked(&conn->serial),
97563 + atomic_read_unchecked(&conn->hi_serial));
97564
97565 return 0;
97566 }
97567 diff --git a/net/rxrpc/ar-transport.c b/net/rxrpc/ar-transport.c
97568 index 92df566..87ec1bf 100644
97569 --- a/net/rxrpc/ar-transport.c
97570 +++ b/net/rxrpc/ar-transport.c
97571 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_alloc_transport(struct rxrpc_local *local,
97572 spin_lock_init(&trans->client_lock);
97573 rwlock_init(&trans->conn_lock);
97574 atomic_set(&trans->usage, 1);
97575 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
97576 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
97577
97578 if (peer->srx.transport.family == AF_INET) {
97579 switch (peer->srx.transport_type) {
97580 diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
97581 index f226709..0e735a8 100644
97582 --- a/net/rxrpc/rxkad.c
97583 +++ b/net/rxrpc/rxkad.c
97584 @@ -610,7 +610,7 @@ static int rxkad_issue_challenge(struct rxrpc_connection *conn)
97585
97586 len = iov[0].iov_len + iov[1].iov_len;
97587
97588 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
97589 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
97590 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
97591
97592 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
97593 @@ -660,7 +660,7 @@ static int rxkad_send_response(struct rxrpc_connection *conn,
97594
97595 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
97596
97597 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
97598 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
97599 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
97600
97601 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
97602 diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
97603 index e7b2d4f..bb1efd0 100644
97604 --- a/net/sctp/ipv6.c
97605 +++ b/net/sctp/ipv6.c
97606 @@ -962,7 +962,7 @@ static const struct inet6_protocol sctpv6_protocol = {
97607 .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL,
97608 };
97609
97610 -static struct sctp_af sctp_af_inet6 = {
97611 +static struct sctp_af sctp_af_inet6 __read_only = {
97612 .sa_family = AF_INET6,
97613 .sctp_xmit = sctp_v6_xmit,
97614 .setsockopt = ipv6_setsockopt,
97615 @@ -994,7 +994,7 @@ static struct sctp_af sctp_af_inet6 = {
97616 #endif
97617 };
97618
97619 -static struct sctp_pf sctp_pf_inet6 = {
97620 +static struct sctp_pf sctp_pf_inet6 __read_only = {
97621 .event_msgname = sctp_inet6_event_msgname,
97622 .skb_msgname = sctp_inet6_skb_msgname,
97623 .af_supported = sctp_inet6_af_supported,
97624 @@ -1019,7 +1019,7 @@ void sctp_v6_pf_init(void)
97625
97626 void sctp_v6_pf_exit(void)
97627 {
97628 - list_del(&sctp_af_inet6.list);
97629 + pax_list_del(&sctp_af_inet6.list);
97630 }
97631
97632 /* Initialize IPv6 support and register with socket layer. */
97633 diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
97634 index 5e17092..19be1d4 100644
97635 --- a/net/sctp/protocol.c
97636 +++ b/net/sctp/protocol.c
97637 @@ -832,8 +832,10 @@ int sctp_register_af(struct sctp_af *af)
97638 return 0;
97639 }
97640
97641 + pax_open_kernel();
97642 INIT_LIST_HEAD(&af->list);
97643 - list_add_tail(&af->list, &sctp_address_families);
97644 + pax_close_kernel();
97645 + pax_list_add_tail(&af->list, &sctp_address_families);
97646 return 1;
97647 }
97648
97649 @@ -963,7 +965,7 @@ static inline int sctp_v4_xmit(struct sk_buff *skb,
97650
97651 static struct sctp_af sctp_af_inet;
97652
97653 -static struct sctp_pf sctp_pf_inet = {
97654 +static struct sctp_pf sctp_pf_inet __read_only = {
97655 .event_msgname = sctp_inet_event_msgname,
97656 .skb_msgname = sctp_inet_skb_msgname,
97657 .af_supported = sctp_inet_af_supported,
97658 @@ -1034,7 +1036,7 @@ static const struct net_protocol sctp_protocol = {
97659 };
97660
97661 /* IPv4 address related functions. */
97662 -static struct sctp_af sctp_af_inet = {
97663 +static struct sctp_af sctp_af_inet __read_only = {
97664 .sa_family = AF_INET,
97665 .sctp_xmit = sctp_v4_xmit,
97666 .setsockopt = ip_setsockopt,
97667 @@ -1119,7 +1121,7 @@ static void sctp_v4_pf_init(void)
97668
97669 static void sctp_v4_pf_exit(void)
97670 {
97671 - list_del(&sctp_af_inet.list);
97672 + pax_list_del(&sctp_af_inet.list);
97673 }
97674
97675 static int sctp_v4_protosw_init(void)
97676 diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
97677 index 1a6eef3..17e898f 100644
97678 --- a/net/sctp/sm_sideeffect.c
97679 +++ b/net/sctp/sm_sideeffect.c
97680 @@ -440,7 +440,7 @@ static void sctp_generate_sack_event(unsigned long data)
97681 sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK);
97682 }
97683
97684 -sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
97685 +sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
97686 NULL,
97687 sctp_generate_t1_cookie_event,
97688 sctp_generate_t1_init_event,
97689 diff --git a/net/sctp/socket.c b/net/sctp/socket.c
97690 index 911b71b..90ade0a 100644
97691 --- a/net/sctp/socket.c
97692 +++ b/net/sctp/socket.c
97693 @@ -2153,11 +2153,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
97694 {
97695 struct sctp_association *asoc;
97696 struct sctp_ulpevent *event;
97697 + struct sctp_event_subscribe subscribe;
97698
97699 if (optlen > sizeof(struct sctp_event_subscribe))
97700 return -EINVAL;
97701 - if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
97702 + if (copy_from_user(&subscribe, optval, optlen))
97703 return -EFAULT;
97704 + sctp_sk(sk)->subscribe = subscribe;
97705
97706 /*
97707 * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
97708 @@ -4213,13 +4215,16 @@ static int sctp_getsockopt_disable_fragments(struct sock *sk, int len,
97709 static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
97710 int __user *optlen)
97711 {
97712 + struct sctp_event_subscribe subscribe;
97713 +
97714 if (len <= 0)
97715 return -EINVAL;
97716 if (len > sizeof(struct sctp_event_subscribe))
97717 len = sizeof(struct sctp_event_subscribe);
97718 if (put_user(len, optlen))
97719 return -EFAULT;
97720 - if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len))
97721 + subscribe = sctp_sk(sk)->subscribe;
97722 + if (copy_to_user(optval, &subscribe, len))
97723 return -EFAULT;
97724 return 0;
97725 }
97726 @@ -4237,6 +4242,8 @@ static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval,
97727 */
97728 static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen)
97729 {
97730 + __u32 autoclose;
97731 +
97732 /* Applicable to UDP-style socket only */
97733 if (sctp_style(sk, TCP))
97734 return -EOPNOTSUPP;
97735 @@ -4245,7 +4252,8 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
97736 len = sizeof(int);
97737 if (put_user(len, optlen))
97738 return -EFAULT;
97739 - if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int)))
97740 + autoclose = sctp_sk(sk)->autoclose;
97741 + if (copy_to_user(optval, &autoclose, sizeof(int)))
97742 return -EFAULT;
97743 return 0;
97744 }
97745 @@ -4617,12 +4625,15 @@ static int sctp_getsockopt_delayed_ack(struct sock *sk, int len,
97746 */
97747 static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen)
97748 {
97749 + struct sctp_initmsg initmsg;
97750 +
97751 if (len < sizeof(struct sctp_initmsg))
97752 return -EINVAL;
97753 len = sizeof(struct sctp_initmsg);
97754 if (put_user(len, optlen))
97755 return -EFAULT;
97756 - if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len))
97757 + initmsg = sctp_sk(sk)->initmsg;
97758 + if (copy_to_user(optval, &initmsg, len))
97759 return -EFAULT;
97760 return 0;
97761 }
97762 @@ -4663,6 +4674,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
97763 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
97764 if (space_left < addrlen)
97765 return -ENOMEM;
97766 + if (addrlen > sizeof(temp) || addrlen < 0)
97767 + return -EFAULT;
97768 if (copy_to_user(to, &temp, addrlen))
97769 return -EFAULT;
97770 to += addrlen;
97771 diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
97772 index 6b36561..4f21064 100644
97773 --- a/net/sctp/sysctl.c
97774 +++ b/net/sctp/sysctl.c
97775 @@ -301,7 +301,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
97776 {
97777 struct net *net = current->nsproxy->net_ns;
97778 char tmp[8];
97779 - struct ctl_table tbl;
97780 + ctl_table_no_const tbl;
97781 int ret;
97782 int changed = 0;
97783 char *none = "none";
97784 @@ -344,7 +344,7 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
97785
97786 int sctp_sysctl_net_register(struct net *net)
97787 {
97788 - struct ctl_table *table;
97789 + ctl_table_no_const *table;
97790 int i;
97791
97792 table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
97793 diff --git a/net/socket.c b/net/socket.c
97794 index e83c416..9169305 100644
97795 --- a/net/socket.c
97796 +++ b/net/socket.c
97797 @@ -88,6 +88,7 @@
97798 #include <linux/magic.h>
97799 #include <linux/slab.h>
97800 #include <linux/xattr.h>
97801 +#include <linux/in.h>
97802
97803 #include <asm/uaccess.h>
97804 #include <asm/unistd.h>
97805 @@ -111,6 +112,8 @@ unsigned int sysctl_net_busy_read __read_mostly;
97806 unsigned int sysctl_net_busy_poll __read_mostly;
97807 #endif
97808
97809 +#include <linux/grsock.h>
97810 +
97811 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
97812 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
97813 unsigned long nr_segs, loff_t pos);
97814 @@ -162,7 +165,7 @@ static const struct file_operations socket_file_ops = {
97815 */
97816
97817 static DEFINE_SPINLOCK(net_family_lock);
97818 -static const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
97819 +const struct net_proto_family __rcu *net_families[NPROTO] __read_mostly;
97820
97821 /*
97822 * Statistics counters of the socket lists
97823 @@ -328,7 +331,7 @@ static struct dentry *sockfs_mount(struct file_system_type *fs_type,
97824 &sockfs_dentry_operations, SOCKFS_MAGIC);
97825 }
97826
97827 -static struct vfsmount *sock_mnt __read_mostly;
97828 +struct vfsmount *sock_mnt __read_mostly;
97829
97830 static struct file_system_type sock_fs_type = {
97831 .name = "sockfs",
97832 @@ -1255,6 +1258,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
97833 return -EAFNOSUPPORT;
97834 if (type < 0 || type >= SOCK_MAX)
97835 return -EINVAL;
97836 + if (protocol < 0)
97837 + return -EINVAL;
97838
97839 /* Compatibility.
97840
97841 @@ -1275,6 +1280,20 @@ int __sock_create(struct net *net, int family, int type, int protocol,
97842 if (err)
97843 return err;
97844
97845 + if(!kern && !gr_search_socket(family, type, protocol)) {
97846 + if (rcu_access_pointer(net_families[family]) == NULL)
97847 + return -EAFNOSUPPORT;
97848 + else
97849 + return -EACCES;
97850 + }
97851 +
97852 + if (!kern && gr_handle_sock_all(family, type, protocol)) {
97853 + if (rcu_access_pointer(net_families[family]) == NULL)
97854 + return -EAFNOSUPPORT;
97855 + else
97856 + return -EACCES;
97857 + }
97858 +
97859 /*
97860 * Allocate the socket and allow the family to set things up. if
97861 * the protocol is 0, the family is instructed to select an appropriate
97862 @@ -1513,6 +1532,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
97863 if (sock) {
97864 err = move_addr_to_kernel(umyaddr, addrlen, &address);
97865 if (err >= 0) {
97866 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
97867 + err = -EACCES;
97868 + goto error;
97869 + }
97870 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
97871 + if (err)
97872 + goto error;
97873 +
97874 err = security_socket_bind(sock,
97875 (struct sockaddr *)&address,
97876 addrlen);
97877 @@ -1521,6 +1548,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct sockaddr __user *, umyaddr, int, addrlen)
97878 (struct sockaddr *)
97879 &address, addrlen);
97880 }
97881 +error:
97882 fput_light(sock->file, fput_needed);
97883 }
97884 return err;
97885 @@ -1544,10 +1572,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, backlog)
97886 if ((unsigned int)backlog > somaxconn)
97887 backlog = somaxconn;
97888
97889 + if (gr_handle_sock_server_other(sock->sk)) {
97890 + err = -EPERM;
97891 + goto error;
97892 + }
97893 +
97894 + err = gr_search_listen(sock);
97895 + if (err)
97896 + goto error;
97897 +
97898 err = security_socket_listen(sock, backlog);
97899 if (!err)
97900 err = sock->ops->listen(sock, backlog);
97901
97902 +error:
97903 fput_light(sock->file, fput_needed);
97904 }
97905 return err;
97906 @@ -1591,6 +1629,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
97907 newsock->type = sock->type;
97908 newsock->ops = sock->ops;
97909
97910 + if (gr_handle_sock_server_other(sock->sk)) {
97911 + err = -EPERM;
97912 + sock_release(newsock);
97913 + goto out_put;
97914 + }
97915 +
97916 + err = gr_search_accept(sock);
97917 + if (err) {
97918 + sock_release(newsock);
97919 + goto out_put;
97920 + }
97921 +
97922 /*
97923 * We don't need try_module_get here, as the listening socket (sock)
97924 * has the protocol module (sock->ops->owner) held.
97925 @@ -1636,6 +1686,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
97926 fd_install(newfd, newfile);
97927 err = newfd;
97928
97929 + gr_attach_curr_ip(newsock->sk);
97930 +
97931 out_put:
97932 fput_light(sock->file, fput_needed);
97933 out:
97934 @@ -1668,6 +1720,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
97935 int, addrlen)
97936 {
97937 struct socket *sock;
97938 + struct sockaddr *sck;
97939 struct sockaddr_storage address;
97940 int err, fput_needed;
97941
97942 @@ -1678,6 +1731,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct sockaddr __user *, uservaddr,
97943 if (err < 0)
97944 goto out_put;
97945
97946 + sck = (struct sockaddr *)&address;
97947 +
97948 + if (gr_handle_sock_client(sck)) {
97949 + err = -EACCES;
97950 + goto out_put;
97951 + }
97952 +
97953 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
97954 + if (err)
97955 + goto out_put;
97956 +
97957 err =
97958 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
97959 if (err)
97960 @@ -1759,6 +1823,8 @@ SYSCALL_DEFINE3(getpeername, int, fd, struct sockaddr __user *, usockaddr,
97961 * the protocol.
97962 */
97963
97964 +asmlinkage long sys_sendto(int, void *, size_t, unsigned, struct sockaddr *, int);
97965 +
97966 SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
97967 unsigned int, flags, struct sockaddr __user *, addr,
97968 int, addr_len)
97969 @@ -1825,7 +1891,7 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
97970 struct socket *sock;
97971 struct iovec iov;
97972 struct msghdr msg;
97973 - struct sockaddr_storage address;
97974 + struct sockaddr_storage address = { };
97975 int err, err2;
97976 int fput_needed;
97977
97978 @@ -2047,7 +2113,7 @@ static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg,
97979 * checking falls down on this.
97980 */
97981 if (copy_from_user(ctl_buf,
97982 - (void __user __force *)msg_sys->msg_control,
97983 + (void __force_user *)msg_sys->msg_control,
97984 ctl_len))
97985 goto out_freectl;
97986 msg_sys->msg_control = ctl_buf;
97987 @@ -2198,7 +2264,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
97988 int err, total_len, len;
97989
97990 /* kernel mode address */
97991 - struct sockaddr_storage addr;
97992 + struct sockaddr_storage addr = { };
97993
97994 /* user mode address pointers */
97995 struct sockaddr __user *uaddr;
97996 @@ -2227,7 +2293,7 @@ static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg,
97997 /* Save the user-mode address (verify_iovec will change the
97998 * kernel msghdr to use the kernel address space)
97999 */
98000 - uaddr = (__force void __user *)msg_sys->msg_name;
98001 + uaddr = (void __force_user *)msg_sys->msg_name;
98002 uaddr_len = COMPAT_NAMELEN(msg);
98003 if (MSG_CMSG_COMPAT & flags)
98004 err = verify_compat_iovec(msg_sys, iov, &addr, VERIFY_WRITE);
98005 @@ -2985,7 +3051,7 @@ static int bond_ioctl(struct net *net, unsigned int cmd,
98006 old_fs = get_fs();
98007 set_fs(KERNEL_DS);
98008 err = dev_ioctl(net, cmd,
98009 - (struct ifreq __user __force *) &kifr);
98010 + (struct ifreq __force_user *) &kifr);
98011 set_fs(old_fs);
98012
98013 return err;
98014 @@ -3094,7 +3160,7 @@ static int compat_sioc_ifmap(struct net *net, unsigned int cmd,
98015
98016 old_fs = get_fs();
98017 set_fs(KERNEL_DS);
98018 - err = dev_ioctl(net, cmd, (void __user __force *)&ifr);
98019 + err = dev_ioctl(net, cmd, (void __force_user *)&ifr);
98020 set_fs(old_fs);
98021
98022 if (cmd == SIOCGIFMAP && !err) {
98023 @@ -3199,7 +3265,7 @@ static int routing_ioctl(struct net *net, struct socket *sock,
98024 ret |= get_user(rtdev, &(ur4->rt_dev));
98025 if (rtdev) {
98026 ret |= copy_from_user(devname, compat_ptr(rtdev), 15);
98027 - r4.rt_dev = (char __user __force *)devname;
98028 + r4.rt_dev = (char __force_user *)devname;
98029 devname[15] = 0;
98030 } else
98031 r4.rt_dev = NULL;
98032 @@ -3425,8 +3491,8 @@ int kernel_getsockopt(struct socket *sock, int level, int optname,
98033 int __user *uoptlen;
98034 int err;
98035
98036 - uoptval = (char __user __force *) optval;
98037 - uoptlen = (int __user __force *) optlen;
98038 + uoptval = (char __force_user *) optval;
98039 + uoptlen = (int __force_user *) optlen;
98040
98041 set_fs(KERNEL_DS);
98042 if (level == SOL_SOCKET)
98043 @@ -3446,7 +3512,7 @@ int kernel_setsockopt(struct socket *sock, int level, int optname,
98044 char __user *uoptval;
98045 int err;
98046
98047 - uoptval = (char __user __force *) optval;
98048 + uoptval = (char __force_user *) optval;
98049
98050 set_fs(KERNEL_DS);
98051 if (level == SOL_SOCKET)
98052 diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
98053 index 09fb638..2e6a5c5 100644
98054 --- a/net/sunrpc/auth_gss/svcauth_gss.c
98055 +++ b/net/sunrpc/auth_gss/svcauth_gss.c
98056 @@ -1140,7 +1140,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
98057 uint64_t *handle)
98058 {
98059 struct rsc rsci, *rscp = NULL;
98060 - static atomic64_t ctxhctr;
98061 + static atomic64_unchecked_t ctxhctr = ATOMIC64_INIT(0);
98062 long long ctxh;
98063 struct gss_api_mech *gm = NULL;
98064 time_t expiry;
98065 @@ -1151,7 +1151,7 @@ static int gss_proxy_save_rsc(struct cache_detail *cd,
98066 status = -ENOMEM;
98067 /* the handle needs to be just a unique id,
98068 * use a static counter */
98069 - ctxh = atomic64_inc_return(&ctxhctr);
98070 + ctxh = atomic64_inc_return_unchecked(&ctxhctr);
98071
98072 /* make a copy for the caller */
98073 *handle = ctxh;
98074 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
98075 index 941d19f..c85ff07 100644
98076 --- a/net/sunrpc/clnt.c
98077 +++ b/net/sunrpc/clnt.c
98078 @@ -1319,7 +1319,9 @@ call_start(struct rpc_task *task)
98079 (RPC_IS_ASYNC(task) ? "async" : "sync"));
98080
98081 /* Increment call count */
98082 - task->tk_msg.rpc_proc->p_count++;
98083 + pax_open_kernel();
98084 + (*(unsigned int *)&task->tk_msg.rpc_proc->p_count)++;
98085 + pax_close_kernel();
98086 clnt->cl_stats->rpccnt++;
98087 task->tk_action = call_reserve;
98088 }
98089 diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
98090 index ff3cc4b..7612a9e 100644
98091 --- a/net/sunrpc/sched.c
98092 +++ b/net/sunrpc/sched.c
98093 @@ -261,9 +261,9 @@ static int rpc_wait_bit_killable(void *word)
98094 #if defined(RPC_DEBUG) || defined(RPC_TRACEPOINTS)
98095 static void rpc_task_set_debuginfo(struct rpc_task *task)
98096 {
98097 - static atomic_t rpc_pid;
98098 + static atomic_unchecked_t rpc_pid;
98099
98100 - task->tk_pid = atomic_inc_return(&rpc_pid);
98101 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
98102 }
98103 #else
98104 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
98105 diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
98106 index b974571..678ed90 100644
98107 --- a/net/sunrpc/svc.c
98108 +++ b/net/sunrpc/svc.c
98109 @@ -1160,7 +1160,9 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
98110 svc_putnl(resv, RPC_SUCCESS);
98111
98112 /* Bump per-procedure stats counter */
98113 - procp->pc_count++;
98114 + pax_open_kernel();
98115 + (*(unsigned int *)&procp->pc_count)++;
98116 + pax_close_kernel();
98117
98118 /* Initialize storage for argp and resp */
98119 memset(rqstp->rq_argp, 0, procp->pc_argsize);
98120 diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
98121 index 621ca7b..59421dd 100644
98122 --- a/net/sunrpc/svcauth_unix.c
98123 +++ b/net/sunrpc/svcauth_unix.c
98124 @@ -414,7 +414,7 @@ struct unix_gid {
98125 struct group_info *gi;
98126 };
98127
98128 -static int unix_gid_hash(kuid_t uid)
98129 +static int __intentional_overflow(-1) unix_gid_hash(kuid_t uid)
98130 {
98131 return hash_long(from_kuid(&init_user_ns, uid), GID_HASHBITS);
98132 }
98133 @@ -470,7 +470,7 @@ static void unix_gid_request(struct cache_detail *cd,
98134 (*bpp)[-1] = '\n';
98135 }
98136
98137 -static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
98138 +static struct unix_gid * __intentional_overflow(-1) unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
98139
98140 static int unix_gid_parse(struct cache_detail *cd,
98141 char *mesg, int mlen)
98142 diff --git a/net/sunrpc/xprtrdma/svc_rdma.c b/net/sunrpc/xprtrdma/svc_rdma.c
98143 index c1b6270..05089c1 100644
98144 --- a/net/sunrpc/xprtrdma/svc_rdma.c
98145 +++ b/net/sunrpc/xprtrdma/svc_rdma.c
98146 @@ -62,15 +62,15 @@ unsigned int svcrdma_max_req_size = RPCRDMA_MAX_REQ_SIZE;
98147 static unsigned int min_max_inline = 4096;
98148 static unsigned int max_max_inline = 65536;
98149
98150 -atomic_t rdma_stat_recv;
98151 -atomic_t rdma_stat_read;
98152 -atomic_t rdma_stat_write;
98153 -atomic_t rdma_stat_sq_starve;
98154 -atomic_t rdma_stat_rq_starve;
98155 -atomic_t rdma_stat_rq_poll;
98156 -atomic_t rdma_stat_rq_prod;
98157 -atomic_t rdma_stat_sq_poll;
98158 -atomic_t rdma_stat_sq_prod;
98159 +atomic_unchecked_t rdma_stat_recv;
98160 +atomic_unchecked_t rdma_stat_read;
98161 +atomic_unchecked_t rdma_stat_write;
98162 +atomic_unchecked_t rdma_stat_sq_starve;
98163 +atomic_unchecked_t rdma_stat_rq_starve;
98164 +atomic_unchecked_t rdma_stat_rq_poll;
98165 +atomic_unchecked_t rdma_stat_rq_prod;
98166 +atomic_unchecked_t rdma_stat_sq_poll;
98167 +atomic_unchecked_t rdma_stat_sq_prod;
98168
98169 /* Temporary NFS request map and context caches */
98170 struct kmem_cache *svc_rdma_map_cachep;
98171 @@ -110,7 +110,7 @@ static int read_reset_stat(struct ctl_table *table, int write,
98172 len -= *ppos;
98173 if (len > *lenp)
98174 len = *lenp;
98175 - if (len && copy_to_user(buffer, str_buf, len))
98176 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
98177 return -EFAULT;
98178 *lenp = len;
98179 *ppos += len;
98180 @@ -151,63 +151,63 @@ static struct ctl_table svcrdma_parm_table[] = {
98181 {
98182 .procname = "rdma_stat_read",
98183 .data = &rdma_stat_read,
98184 - .maxlen = sizeof(atomic_t),
98185 + .maxlen = sizeof(atomic_unchecked_t),
98186 .mode = 0644,
98187 .proc_handler = read_reset_stat,
98188 },
98189 {
98190 .procname = "rdma_stat_recv",
98191 .data = &rdma_stat_recv,
98192 - .maxlen = sizeof(atomic_t),
98193 + .maxlen = sizeof(atomic_unchecked_t),
98194 .mode = 0644,
98195 .proc_handler = read_reset_stat,
98196 },
98197 {
98198 .procname = "rdma_stat_write",
98199 .data = &rdma_stat_write,
98200 - .maxlen = sizeof(atomic_t),
98201 + .maxlen = sizeof(atomic_unchecked_t),
98202 .mode = 0644,
98203 .proc_handler = read_reset_stat,
98204 },
98205 {
98206 .procname = "rdma_stat_sq_starve",
98207 .data = &rdma_stat_sq_starve,
98208 - .maxlen = sizeof(atomic_t),
98209 + .maxlen = sizeof(atomic_unchecked_t),
98210 .mode = 0644,
98211 .proc_handler = read_reset_stat,
98212 },
98213 {
98214 .procname = "rdma_stat_rq_starve",
98215 .data = &rdma_stat_rq_starve,
98216 - .maxlen = sizeof(atomic_t),
98217 + .maxlen = sizeof(atomic_unchecked_t),
98218 .mode = 0644,
98219 .proc_handler = read_reset_stat,
98220 },
98221 {
98222 .procname = "rdma_stat_rq_poll",
98223 .data = &rdma_stat_rq_poll,
98224 - .maxlen = sizeof(atomic_t),
98225 + .maxlen = sizeof(atomic_unchecked_t),
98226 .mode = 0644,
98227 .proc_handler = read_reset_stat,
98228 },
98229 {
98230 .procname = "rdma_stat_rq_prod",
98231 .data = &rdma_stat_rq_prod,
98232 - .maxlen = sizeof(atomic_t),
98233 + .maxlen = sizeof(atomic_unchecked_t),
98234 .mode = 0644,
98235 .proc_handler = read_reset_stat,
98236 },
98237 {
98238 .procname = "rdma_stat_sq_poll",
98239 .data = &rdma_stat_sq_poll,
98240 - .maxlen = sizeof(atomic_t),
98241 + .maxlen = sizeof(atomic_unchecked_t),
98242 .mode = 0644,
98243 .proc_handler = read_reset_stat,
98244 },
98245 {
98246 .procname = "rdma_stat_sq_prod",
98247 .data = &rdma_stat_sq_prod,
98248 - .maxlen = sizeof(atomic_t),
98249 + .maxlen = sizeof(atomic_unchecked_t),
98250 .mode = 0644,
98251 .proc_handler = read_reset_stat,
98252 },
98253 diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
98254 index 0ce7552..d074459 100644
98255 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
98256 +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
98257 @@ -501,7 +501,7 @@ next_sge:
98258 svc_rdma_put_context(ctxt, 0);
98259 goto out;
98260 }
98261 - atomic_inc(&rdma_stat_read);
98262 + atomic_inc_unchecked(&rdma_stat_read);
98263
98264 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
98265 chl_map->ch[ch_no].count -= read_wr.num_sge;
98266 @@ -611,7 +611,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
98267 dto_q);
98268 list_del_init(&ctxt->dto_q);
98269 } else {
98270 - atomic_inc(&rdma_stat_rq_starve);
98271 + atomic_inc_unchecked(&rdma_stat_rq_starve);
98272 clear_bit(XPT_DATA, &xprt->xpt_flags);
98273 ctxt = NULL;
98274 }
98275 @@ -631,7 +631,7 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
98276 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
98277 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
98278 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
98279 - atomic_inc(&rdma_stat_recv);
98280 + atomic_inc_unchecked(&rdma_stat_recv);
98281
98282 /* Build up the XDR from the receive buffers. */
98283 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
98284 diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
98285 index c1d124d..acfc59e 100644
98286 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
98287 +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
98288 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
98289 write_wr.wr.rdma.remote_addr = to;
98290
98291 /* Post It */
98292 - atomic_inc(&rdma_stat_write);
98293 + atomic_inc_unchecked(&rdma_stat_write);
98294 if (svc_rdma_send(xprt, &write_wr))
98295 goto err;
98296 return 0;
98297 diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
98298 index 62e4f9b..dd3f2d7 100644
98299 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
98300 +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
98301 @@ -292,7 +292,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
98302 return;
98303
98304 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
98305 - atomic_inc(&rdma_stat_rq_poll);
98306 + atomic_inc_unchecked(&rdma_stat_rq_poll);
98307
98308 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
98309 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
98310 @@ -314,7 +314,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
98311 }
98312
98313 if (ctxt)
98314 - atomic_inc(&rdma_stat_rq_prod);
98315 + atomic_inc_unchecked(&rdma_stat_rq_prod);
98316
98317 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
98318 /*
98319 @@ -386,7 +386,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
98320 return;
98321
98322 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
98323 - atomic_inc(&rdma_stat_sq_poll);
98324 + atomic_inc_unchecked(&rdma_stat_sq_poll);
98325 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
98326 if (wc.status != IB_WC_SUCCESS)
98327 /* Close the transport */
98328 @@ -404,7 +404,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
98329 }
98330
98331 if (ctxt)
98332 - atomic_inc(&rdma_stat_sq_prod);
98333 + atomic_inc_unchecked(&rdma_stat_sq_prod);
98334 }
98335
98336 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
98337 @@ -1262,7 +1262,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
98338 spin_lock_bh(&xprt->sc_lock);
98339 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
98340 spin_unlock_bh(&xprt->sc_lock);
98341 - atomic_inc(&rdma_stat_sq_starve);
98342 + atomic_inc_unchecked(&rdma_stat_sq_starve);
98343
98344 /* See if we can opportunistically reap SQ WR to make room */
98345 sq_cq_reap(xprt);
98346 diff --git a/net/sysctl_net.c b/net/sysctl_net.c
98347 index e7000be..e3b0ba7 100644
98348 --- a/net/sysctl_net.c
98349 +++ b/net/sysctl_net.c
98350 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ctl_table_header *head,
98351 kgid_t root_gid = make_kgid(net->user_ns, 0);
98352
98353 /* Allow network administrator to have same access as root. */
98354 - if (ns_capable(net->user_ns, CAP_NET_ADMIN) ||
98355 + if (ns_capable_nolog(net->user_ns, CAP_NET_ADMIN) ||
98356 uid_eq(root_uid, current_euid())) {
98357 int mode = (table->mode >> 6) & 7;
98358 return (mode << 6) | (mode << 3) | mode;
98359 diff --git a/net/tipc/link.c b/net/tipc/link.c
98360 index 0cc3d90..cc42021 100644
98361 --- a/net/tipc/link.c
98362 +++ b/net/tipc/link.c
98363 @@ -1165,7 +1165,7 @@ static int link_send_sections_long(struct tipc_port *sender,
98364 struct tipc_msg fragm_hdr;
98365 struct sk_buff *buf, *buf_chain, *prev;
98366 u32 fragm_crs, fragm_rest, hsz, sect_rest;
98367 - const unchar *sect_crs;
98368 + const unchar __user *sect_crs;
98369 int curr_sect;
98370 u32 fragm_no;
98371 int res = 0;
98372 @@ -1207,7 +1207,7 @@ again:
98373
98374 if (!sect_rest) {
98375 sect_rest = msg_sect[++curr_sect].iov_len;
98376 - sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
98377 + sect_crs = (const unchar __user *)msg_sect[curr_sect].iov_base;
98378 }
98379
98380 if (sect_rest < fragm_rest)
98381 diff --git a/net/tipc/msg.c b/net/tipc/msg.c
98382 index ced60e2..54eab6c 100644
98383 --- a/net/tipc/msg.c
98384 +++ b/net/tipc/msg.c
98385 @@ -93,7 +93,7 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
98386 skb_copy_to_linear_data(*buf, hdr, hsz);
98387 for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
98388 skb_copy_to_linear_data_offset(*buf, pos,
98389 - msg_sect[cnt].iov_base,
98390 + (const void __force_kernel *)msg_sect[cnt].iov_base,
98391 msg_sect[cnt].iov_len);
98392 pos += msg_sect[cnt].iov_len;
98393 }
98394 diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
98395 index d38bb45..4fd6ac6 100644
98396 --- a/net/tipc/subscr.c
98397 +++ b/net/tipc/subscr.c
98398 @@ -98,7 +98,7 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
98399 struct kvec msg_sect;
98400 int ret;
98401
98402 - msg_sect.iov_base = (void *)&sub->evt;
98403 + msg_sect.iov_base = (void __force_user *)&sub->evt;
98404 msg_sect.iov_len = sizeof(struct tipc_event);
98405
98406 sub->evt.event = htohl(event, sub->swap);
98407 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
98408 index 01625cc..d486b64 100644
98409 --- a/net/unix/af_unix.c
98410 +++ b/net/unix/af_unix.c
98411 @@ -784,6 +784,12 @@ static struct sock *unix_find_other(struct net *net,
98412 err = -ECONNREFUSED;
98413 if (!S_ISSOCK(inode->i_mode))
98414 goto put_fail;
98415 +
98416 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
98417 + err = -EACCES;
98418 + goto put_fail;
98419 + }
98420 +
98421 u = unix_find_socket_byinode(inode);
98422 if (!u)
98423 goto put_fail;
98424 @@ -804,6 +810,13 @@ static struct sock *unix_find_other(struct net *net,
98425 if (u) {
98426 struct dentry *dentry;
98427 dentry = unix_sk(u)->path.dentry;
98428 +
98429 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
98430 + err = -EPERM;
98431 + sock_put(u);
98432 + goto fail;
98433 + }
98434 +
98435 if (dentry)
98436 touch_atime(&unix_sk(u)->path);
98437 } else
98438 @@ -837,12 +850,18 @@ static int unix_mknod(const char *sun_path, umode_t mode, struct path *res)
98439 */
98440 err = security_path_mknod(&path, dentry, mode, 0);
98441 if (!err) {
98442 + if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
98443 + err = -EACCES;
98444 + goto out;
98445 + }
98446 err = vfs_mknod(path.dentry->d_inode, dentry, mode, 0);
98447 if (!err) {
98448 res->mnt = mntget(path.mnt);
98449 res->dentry = dget(dentry);
98450 + gr_handle_create(dentry, path.mnt);
98451 }
98452 }
98453 +out:
98454 done_path_create(&path, dentry);
98455 return err;
98456 }
98457 @@ -2328,9 +2347,13 @@ static int unix_seq_show(struct seq_file *seq, void *v)
98458 seq_puts(seq, "Num RefCount Protocol Flags Type St "
98459 "Inode Path\n");
98460 else {
98461 - struct sock *s = v;
98462 + struct sock *s = v, *peer;
98463 struct unix_sock *u = unix_sk(s);
98464 unix_state_lock(s);
98465 + peer = unix_peer(s);
98466 + unix_state_unlock(s);
98467 +
98468 + unix_state_double_lock(s, peer);
98469
98470 seq_printf(seq, "%pK: %08X %08X %08X %04X %02X %5lu",
98471 s,
98472 @@ -2357,8 +2380,10 @@ static int unix_seq_show(struct seq_file *seq, void *v)
98473 }
98474 for ( ; i < len; i++)
98475 seq_putc(seq, u->addr->name->sun_path[i]);
98476 - }
98477 - unix_state_unlock(s);
98478 + } else if (peer)
98479 + seq_printf(seq, " P%lu", sock_i_ino(peer));
98480 +
98481 + unix_state_double_unlock(s, peer);
98482 seq_putc(seq, '\n');
98483 }
98484
98485 diff --git a/net/unix/sysctl_net_unix.c b/net/unix/sysctl_net_unix.c
98486 index b3d5150..ff3a837 100644
98487 --- a/net/unix/sysctl_net_unix.c
98488 +++ b/net/unix/sysctl_net_unix.c
98489 @@ -28,7 +28,7 @@ static struct ctl_table unix_table[] = {
98490
98491 int __net_init unix_sysctl_register(struct net *net)
98492 {
98493 - struct ctl_table *table;
98494 + ctl_table_no_const *table;
98495
98496 table = kmemdup(unix_table, sizeof(unix_table), GFP_KERNEL);
98497 if (table == NULL)
98498 diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
98499 index c8717c1..08539f5 100644
98500 --- a/net/wireless/wext-core.c
98501 +++ b/net/wireless/wext-core.c
98502 @@ -748,8 +748,7 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
98503 */
98504
98505 /* Support for very large requests */
98506 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
98507 - (user_length > descr->max_tokens)) {
98508 + if (user_length > descr->max_tokens) {
98509 /* Allow userspace to GET more than max so
98510 * we can support any size GET requests.
98511 * There is still a limit : -ENOMEM.
98512 @@ -788,22 +787,6 @@ static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd,
98513 }
98514 }
98515
98516 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
98517 - /*
98518 - * If this is a GET, but not NOMAX, it means that the extra
98519 - * data is not bounded by userspace, but by max_tokens. Thus
98520 - * set the length to max_tokens. This matches the extra data
98521 - * allocation.
98522 - * The driver should fill it with the number of tokens it
98523 - * provided, and it may check iwp->length rather than having
98524 - * knowledge of max_tokens. If the driver doesn't change the
98525 - * iwp->length, this ioctl just copies back max_token tokens
98526 - * filled with zeroes. Hopefully the driver isn't claiming
98527 - * them to be valid data.
98528 - */
98529 - iwp->length = descr->max_tokens;
98530 - }
98531 -
98532 err = handler(dev, info, (union iwreq_data *) iwp, extra);
98533
98534 iwp->length += essid_compat;
98535 diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
98536 index 76e1873..4a12664 100644
98537 --- a/net/xfrm/xfrm_policy.c
98538 +++ b/net/xfrm/xfrm_policy.c
98539 @@ -332,7 +332,7 @@ static void xfrm_policy_kill(struct xfrm_policy *policy)
98540 {
98541 policy->walk.dead = 1;
98542
98543 - atomic_inc(&policy->genid);
98544 + atomic_inc_unchecked(&policy->genid);
98545
98546 if (del_timer(&policy->polq.hold_timer))
98547 xfrm_pol_put(policy);
98548 @@ -660,7 +660,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
98549 hlist_add_head(&policy->bydst, chain);
98550 xfrm_pol_hold(policy);
98551 net->xfrm.policy_count[dir]++;
98552 - atomic_inc(&flow_cache_genid);
98553 + atomic_inc_unchecked(&flow_cache_genid);
98554
98555 /* After previous checking, family can either be AF_INET or AF_INET6 */
98556 if (policy->family == AF_INET)
98557 @@ -1636,7 +1636,7 @@ free_dst:
98558 goto out;
98559 }
98560
98561 -static int inline
98562 +static inline int
98563 xfrm_dst_alloc_copy(void **target, const void *src, int size)
98564 {
98565 if (!*target) {
98566 @@ -1648,7 +1648,7 @@ xfrm_dst_alloc_copy(void **target, const void *src, int size)
98567 return 0;
98568 }
98569
98570 -static int inline
98571 +static inline int
98572 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
98573 {
98574 #ifdef CONFIG_XFRM_SUB_POLICY
98575 @@ -1660,7 +1660,7 @@ xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
98576 #endif
98577 }
98578
98579 -static int inline
98580 +static inline int
98581 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
98582 {
98583 #ifdef CONFIG_XFRM_SUB_POLICY
98584 @@ -1754,7 +1754,7 @@ xfrm_resolve_and_create_bundle(struct xfrm_policy **pols, int num_pols,
98585
98586 xdst->num_pols = num_pols;
98587 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
98588 - xdst->policy_genid = atomic_read(&pols[0]->genid);
98589 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
98590
98591 return xdst;
98592 }
98593 @@ -2575,11 +2575,12 @@ void xfrm_garbage_collect(struct net *net)
98594 }
98595 EXPORT_SYMBOL(xfrm_garbage_collect);
98596
98597 -static void xfrm_garbage_collect_deferred(struct net *net)
98598 +void xfrm_garbage_collect_deferred(struct net *net)
98599 {
98600 flow_cache_flush_deferred();
98601 __xfrm_garbage_collect(net);
98602 }
98603 +EXPORT_SYMBOL(xfrm_garbage_collect_deferred);
98604
98605 static void xfrm_init_pmtu(struct dst_entry *dst)
98606 {
98607 @@ -2629,7 +2630,7 @@ static int xfrm_bundle_ok(struct xfrm_dst *first)
98608 if (xdst->xfrm_genid != dst->xfrm->genid)
98609 return 0;
98610 if (xdst->num_pols > 0 &&
98611 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
98612 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
98613 return 0;
98614
98615 mtu = dst_mtu(dst->child);
98616 @@ -2717,8 +2718,6 @@ int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo)
98617 dst_ops->link_failure = xfrm_link_failure;
98618 if (likely(dst_ops->neigh_lookup == NULL))
98619 dst_ops->neigh_lookup = xfrm_neigh_lookup;
98620 - if (likely(afinfo->garbage_collect == NULL))
98621 - afinfo->garbage_collect = xfrm_garbage_collect_deferred;
98622 rcu_assign_pointer(xfrm_policy_afinfo[afinfo->family], afinfo);
98623 }
98624 spin_unlock(&xfrm_policy_afinfo_lock);
98625 @@ -2772,7 +2771,6 @@ int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo)
98626 dst_ops->check = NULL;
98627 dst_ops->negative_advice = NULL;
98628 dst_ops->link_failure = NULL;
98629 - afinfo->garbage_collect = NULL;
98630 }
98631 return err;
98632 }
98633 @@ -3155,7 +3153,7 @@ static int xfrm_policy_migrate(struct xfrm_policy *pol,
98634 sizeof(pol->xfrm_vec[i].saddr));
98635 pol->xfrm_vec[i].encap_family = mp->new_family;
98636 /* flush bundles */
98637 - atomic_inc(&pol->genid);
98638 + atomic_inc_unchecked(&pol->genid);
98639 }
98640 }
98641
98642 diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
98643 index b9c3f9e..4767526 100644
98644 --- a/net/xfrm/xfrm_state.c
98645 +++ b/net/xfrm/xfrm_state.c
98646 @@ -174,12 +174,14 @@ int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
98647
98648 if (unlikely(afinfo == NULL))
98649 return -EAFNOSUPPORT;
98650 - typemap = afinfo->type_map;
98651 + typemap = (const struct xfrm_type **)afinfo->type_map;
98652 spin_lock_bh(&xfrm_type_lock);
98653
98654 - if (likely(typemap[type->proto] == NULL))
98655 + if (likely(typemap[type->proto] == NULL)) {
98656 + pax_open_kernel();
98657 typemap[type->proto] = type;
98658 - else
98659 + pax_close_kernel();
98660 + } else
98661 err = -EEXIST;
98662 spin_unlock_bh(&xfrm_type_lock);
98663 xfrm_state_put_afinfo(afinfo);
98664 @@ -195,13 +197,16 @@ int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
98665
98666 if (unlikely(afinfo == NULL))
98667 return -EAFNOSUPPORT;
98668 - typemap = afinfo->type_map;
98669 + typemap = (const struct xfrm_type **)afinfo->type_map;
98670 spin_lock_bh(&xfrm_type_lock);
98671
98672 if (unlikely(typemap[type->proto] != type))
98673 err = -ENOENT;
98674 - else
98675 + else {
98676 + pax_open_kernel();
98677 typemap[type->proto] = NULL;
98678 + pax_close_kernel();
98679 + }
98680 spin_unlock_bh(&xfrm_type_lock);
98681 xfrm_state_put_afinfo(afinfo);
98682 return err;
98683 @@ -211,7 +216,6 @@ EXPORT_SYMBOL(xfrm_unregister_type);
98684 static const struct xfrm_type *xfrm_get_type(u8 proto, unsigned short family)
98685 {
98686 struct xfrm_state_afinfo *afinfo;
98687 - const struct xfrm_type **typemap;
98688 const struct xfrm_type *type;
98689 int modload_attempted = 0;
98690
98691 @@ -219,9 +223,8 @@ retry:
98692 afinfo = xfrm_state_get_afinfo(family);
98693 if (unlikely(afinfo == NULL))
98694 return NULL;
98695 - typemap = afinfo->type_map;
98696
98697 - type = typemap[proto];
98698 + type = afinfo->type_map[proto];
98699 if (unlikely(type && !try_module_get(type->owner)))
98700 type = NULL;
98701 if (!type && !modload_attempted) {
98702 @@ -255,7 +258,7 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
98703 return -EAFNOSUPPORT;
98704
98705 err = -EEXIST;
98706 - modemap = afinfo->mode_map;
98707 + modemap = (struct xfrm_mode **)afinfo->mode_map;
98708 spin_lock_bh(&xfrm_mode_lock);
98709 if (modemap[mode->encap])
98710 goto out;
98711 @@ -264,8 +267,10 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
98712 if (!try_module_get(afinfo->owner))
98713 goto out;
98714
98715 - mode->afinfo = afinfo;
98716 + pax_open_kernel();
98717 + *(const void **)&mode->afinfo = afinfo;
98718 modemap[mode->encap] = mode;
98719 + pax_close_kernel();
98720 err = 0;
98721
98722 out:
98723 @@ -289,10 +294,12 @@ int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
98724 return -EAFNOSUPPORT;
98725
98726 err = -ENOENT;
98727 - modemap = afinfo->mode_map;
98728 + modemap = (struct xfrm_mode **)afinfo->mode_map;
98729 spin_lock_bh(&xfrm_mode_lock);
98730 if (likely(modemap[mode->encap] == mode)) {
98731 + pax_open_kernel();
98732 modemap[mode->encap] = NULL;
98733 + pax_close_kernel();
98734 module_put(mode->afinfo->owner);
98735 err = 0;
98736 }
98737 @@ -1486,10 +1493,10 @@ EXPORT_SYMBOL(xfrm_find_acq_byseq);
98738 u32 xfrm_get_acqseq(void)
98739 {
98740 u32 res;
98741 - static atomic_t acqseq;
98742 + static atomic_unchecked_t acqseq;
98743
98744 do {
98745 - res = atomic_inc_return(&acqseq);
98746 + res = atomic_inc_return_unchecked(&acqseq);
98747 } while (!res);
98748
98749 return res;
98750 diff --git a/net/xfrm/xfrm_sysctl.c b/net/xfrm/xfrm_sysctl.c
98751 index 05a6e3d..6716ec9 100644
98752 --- a/net/xfrm/xfrm_sysctl.c
98753 +++ b/net/xfrm/xfrm_sysctl.c
98754 @@ -42,7 +42,7 @@ static struct ctl_table xfrm_table[] = {
98755
98756 int __net_init xfrm_sysctl_init(struct net *net)
98757 {
98758 - struct ctl_table *table;
98759 + ctl_table_no_const *table;
98760
98761 __xfrm_sysctl_init(net);
98762
98763 diff --git a/scripts/Makefile.build b/scripts/Makefile.build
98764 index d5d859c..781cbcb 100644
98765 --- a/scripts/Makefile.build
98766 +++ b/scripts/Makefile.build
98767 @@ -111,7 +111,7 @@ endif
98768 endif
98769
98770 # Do not include host rules unless needed
98771 -ifneq ($(hostprogs-y)$(hostprogs-m),)
98772 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m)$(hostcxxlibs-y)$(hostcxxlibs-m),)
98773 include scripts/Makefile.host
98774 endif
98775
98776 diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
98777 index 686cb0d..9d653bf 100644
98778 --- a/scripts/Makefile.clean
98779 +++ b/scripts/Makefile.clean
98780 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subdir-ymn))
98781 __clean-files := $(extra-y) $(always) \
98782 $(targets) $(clean-files) \
98783 $(host-progs) \
98784 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
98785 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
98786 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
98787
98788 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
98789
98790 diff --git a/scripts/Makefile.host b/scripts/Makefile.host
98791 index 1ac414f..38575f7 100644
98792 --- a/scripts/Makefile.host
98793 +++ b/scripts/Makefile.host
98794 @@ -31,6 +31,8 @@
98795 # Note: Shared libraries consisting of C++ files are not supported
98796
98797 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
98798 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
98799 +__hostcxxlibs := $(sort $(hostcxxlibs-y) $(hostcxxlibs-m))
98800
98801 # C code
98802 # Executables compiled from a single .c file
98803 @@ -54,11 +56,15 @@ host-cxxobjs := $(sort $(foreach m,$(host-cxxmulti),$($(m)-cxxobjs)))
98804 # Shared libaries (only .c supported)
98805 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
98806 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
98807 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
98808 +host-cxxshlib := $(sort $(filter %.so, $(__hostcxxlibs)))
98809 # Remove .so files from "xxx-objs"
98810 host-cobjs := $(filter-out %.so,$(host-cobjs))
98811 +host-cxxobjs := $(filter-out %.so,$(host-cxxobjs))
98812
98813 -#Object (.o) files used by the shared libaries
98814 +# Object (.o) files used by the shared libaries
98815 host-cshobjs := $(sort $(foreach m,$(host-cshlib),$($(m:.so=-objs))))
98816 +host-cxxshobjs := $(sort $(foreach m,$(host-cxxshlib),$($(m:.so=-objs))))
98817
98818 # output directory for programs/.o files
98819 # hostprogs-y := tools/build may have been specified. Retrieve directory
98820 @@ -82,7 +88,9 @@ host-cobjs := $(addprefix $(obj)/,$(host-cobjs))
98821 host-cxxmulti := $(addprefix $(obj)/,$(host-cxxmulti))
98822 host-cxxobjs := $(addprefix $(obj)/,$(host-cxxobjs))
98823 host-cshlib := $(addprefix $(obj)/,$(host-cshlib))
98824 +host-cxxshlib := $(addprefix $(obj)/,$(host-cxxshlib))
98825 host-cshobjs := $(addprefix $(obj)/,$(host-cshobjs))
98826 +host-cxxshobjs := $(addprefix $(obj)/,$(host-cxxshobjs))
98827 host-objdirs := $(addprefix $(obj)/,$(host-objdirs))
98828
98829 obj-dirs += $(host-objdirs)
98830 @@ -156,6 +164,13 @@ quiet_cmd_host-cshobjs = HOSTCC -fPIC $@
98831 $(host-cshobjs): $(obj)/%.o: $(src)/%.c FORCE
98832 $(call if_changed_dep,host-cshobjs)
98833
98834 +# Compile .c file, create position independent .o file
98835 +# host-cxxshobjs -> .o
98836 +quiet_cmd_host-cxxshobjs = HOSTCXX -fPIC $@
98837 + cmd_host-cxxshobjs = $(HOSTCXX) $(hostcxx_flags) -fPIC -c -o $@ $<
98838 +$(host-cxxshobjs): $(obj)/%.o: $(src)/%.c FORCE
98839 + $(call if_changed_dep,host-cxxshobjs)
98840 +
98841 # Link a shared library, based on position independent .o files
98842 # *.o -> .so shared library (host-cshlib)
98843 quiet_cmd_host-cshlib = HOSTLLD -shared $@
98844 @@ -165,6 +180,15 @@ quiet_cmd_host-cshlib = HOSTLLD -shared $@
98845 $(host-cshlib): $(obj)/%: $(host-cshobjs) FORCE
98846 $(call if_changed,host-cshlib)
98847
98848 +# Link a shared library, based on position independent .o files
98849 +# *.o -> .so shared library (host-cxxshlib)
98850 +quiet_cmd_host-cxxshlib = HOSTLLD -shared $@
98851 + cmd_host-cxxshlib = $(HOSTCXX) $(HOSTLDFLAGS) -shared -o $@ \
98852 + $(addprefix $(obj)/,$($(@F:.so=-objs))) \
98853 + $(HOST_LOADLIBES) $(HOSTLOADLIBES_$(@F))
98854 +$(host-cxxshlib): $(obj)/%: $(host-cxxshobjs) FORCE
98855 + $(call if_changed,host-cxxshlib)
98856 +
98857 targets += $(host-csingle) $(host-cmulti) $(host-cobjs)\
98858 - $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs)
98859 + $(host-cxxmulti) $(host-cxxobjs) $(host-cshlib) $(host-cshobjs) $(host-cxxshlib) $(host-cxxshobjs)
98860
98861 diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c
98862 index 078fe1d..fbdb363 100644
98863 --- a/scripts/basic/fixdep.c
98864 +++ b/scripts/basic/fixdep.c
98865 @@ -161,7 +161,7 @@ static unsigned int strhash(const char *str, unsigned int sz)
98866 /*
98867 * Lookup a value in the configuration string.
98868 */
98869 -static int is_defined_config(const char *name, int len, unsigned int hash)
98870 +static int is_defined_config(const char *name, unsigned int len, unsigned int hash)
98871 {
98872 struct item *aux;
98873
98874 @@ -211,10 +211,10 @@ static void clear_config(void)
98875 /*
98876 * Record the use of a CONFIG_* word.
98877 */
98878 -static void use_config(const char *m, int slen)
98879 +static void use_config(const char *m, unsigned int slen)
98880 {
98881 unsigned int hash = strhash(m, slen);
98882 - int c, i;
98883 + unsigned int c, i;
98884
98885 if (is_defined_config(m, slen, hash))
98886 return;
98887 @@ -235,9 +235,9 @@ static void use_config(const char *m, int slen)
98888
98889 static void parse_config_file(const char *map, size_t len)
98890 {
98891 - const int *end = (const int *) (map + len);
98892 + const unsigned int *end = (const unsigned int *) (map + len);
98893 /* start at +1, so that p can never be < map */
98894 - const int *m = (const int *) map + 1;
98895 + const unsigned int *m = (const unsigned int *) map + 1;
98896 const char *p, *q;
98897
98898 for (; m < end; m++) {
98899 @@ -435,7 +435,7 @@ static void print_deps(void)
98900 static void traps(void)
98901 {
98902 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
98903 - int *p = (int *)test;
98904 + unsigned int *p = (unsigned int *)test;
98905
98906 if (*p != INT_CONF) {
98907 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianness? %#x\n",
98908 diff --git a/scripts/gcc-plugin.sh b/scripts/gcc-plugin.sh
98909 new file mode 100644
98910 index 0000000..5e0222d
98911 --- /dev/null
98912 +++ b/scripts/gcc-plugin.sh
98913 @@ -0,0 +1,17 @@
98914 +#!/bin/bash
98915 +plugincc=`$1 -E -shared - -o /dev/null -I\`$3 -print-file-name=plugin\`/include 2>&1 <<EOF
98916 +#include "gcc-plugin.h"
98917 +#include "tree.h"
98918 +#include "tm.h"
98919 +#include "rtl.h"
98920 +#ifdef ENABLE_BUILD_WITH_CXX
98921 +#warning $2
98922 +#else
98923 +#warning $1
98924 +#endif
98925 +EOF`
98926 +if [ $? -eq 0 ]
98927 +then
98928 + [[ "$plugincc" =~ "$1" ]] && echo "$1"
98929 + [[ "$plugincc" =~ "$2" ]] && echo "$2"
98930 +fi
98931 diff --git a/scripts/headers_install.sh b/scripts/headers_install.sh
98932 index 5de5660..d3deb89 100644
98933 --- a/scripts/headers_install.sh
98934 +++ b/scripts/headers_install.sh
98935 @@ -32,6 +32,7 @@ do
98936 FILE="$(basename "$i")"
98937 sed -r \
98938 -e 's/([ \t(])(__user|__force|__iomem)[ \t]/\1/g' \
98939 + -e 's/__intentional_overflow\([- \t,0-9]*\)//g' \
98940 -e 's/__attribute_const__([ \t]|$)/\1/g' \
98941 -e 's@^#include <linux/compiler.h>@@' \
98942 -e 's/(^|[^a-zA-Z0-9])__packed([^a-zA-Z0-9_]|$)/\1__attribute__((packed))\2/g' \
98943 diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
98944 index 32b10f5..64eeb30 100644
98945 --- a/scripts/link-vmlinux.sh
98946 +++ b/scripts/link-vmlinux.sh
98947 @@ -82,7 +82,9 @@ kallsyms()
98948 kallsymopt="${kallsymopt} --all-symbols"
98949 fi
98950
98951 - kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
98952 + if [ -z "${CONFIG_X86_32}" ] || [ -z "${CONFIG_PAX_KERNEXEC}" ]; then
98953 + kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
98954 + fi
98955
98956 local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
98957 ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
98958 @@ -160,7 +162,7 @@ else
98959 fi;
98960
98961 # final build of init/
98962 -${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
98963 +${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init GCC_PLUGINS_CFLAGS="${GCC_PLUGINS_CFLAGS}" GCC_PLUGINS_AFLAGS="${GCC_PLUGINS_AFLAGS}"
98964
98965 kallsymso=""
98966 kallsyms_vmlinux=""
98967 diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
98968 index 2370863..212fbca 100644
98969 --- a/scripts/mod/file2alias.c
98970 +++ b/scripts/mod/file2alias.c
98971 @@ -142,7 +142,7 @@ static void device_id_check(const char *modname, const char *device_id,
98972 unsigned long size, unsigned long id_size,
98973 void *symval)
98974 {
98975 - int i;
98976 + unsigned int i;
98977
98978 if (size % id_size || size < id_size) {
98979 fatal("%s: sizeof(struct %s_device_id)=%lu is not a modulo "
98980 @@ -170,7 +170,7 @@ static void device_id_check(const char *modname, const char *device_id,
98981 /* USB is special because the bcdDevice can be matched against a numeric range */
98982 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipNinN" */
98983 static void do_usb_entry(void *symval,
98984 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
98985 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
98986 unsigned char range_lo, unsigned char range_hi,
98987 unsigned char max, struct module *mod)
98988 {
98989 @@ -280,7 +280,7 @@ static void do_usb_entry_multi(void *symval, struct module *mod)
98990 {
98991 unsigned int devlo, devhi;
98992 unsigned char chi, clo, max;
98993 - int ndigits;
98994 + unsigned int ndigits;
98995
98996 DEF_FIELD(symval, usb_device_id, match_flags);
98997 DEF_FIELD(symval, usb_device_id, idVendor);
98998 @@ -533,7 +533,7 @@ static void do_pnp_device_entry(void *symval, unsigned long size,
98999 for (i = 0; i < count; i++) {
99000 DEF_FIELD_ADDR(symval + i*id_size, pnp_device_id, id);
99001 char acpi_id[sizeof(*id)];
99002 - int j;
99003 + unsigned int j;
99004
99005 buf_printf(&mod->dev_table_buf,
99006 "MODULE_ALIAS(\"pnp:d%s*\");\n", *id);
99007 @@ -562,7 +562,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
99008
99009 for (j = 0; j < PNP_MAX_DEVICES; j++) {
99010 const char *id = (char *)(*devs)[j].id;
99011 - int i2, j2;
99012 + unsigned int i2, j2;
99013 int dup = 0;
99014
99015 if (!id[0])
99016 @@ -588,7 +588,7 @@ static void do_pnp_card_entries(void *symval, unsigned long size,
99017 /* add an individual alias for every device entry */
99018 if (!dup) {
99019 char acpi_id[PNP_ID_LEN];
99020 - int k;
99021 + unsigned int k;
99022
99023 buf_printf(&mod->dev_table_buf,
99024 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
99025 @@ -940,7 +940,7 @@ static void dmi_ascii_filter(char *d, const char *s)
99026 static int do_dmi_entry(const char *filename, void *symval,
99027 char *alias)
99028 {
99029 - int i, j;
99030 + unsigned int i, j;
99031 DEF_FIELD_ADDR(symval, dmi_system_id, matches);
99032 sprintf(alias, "dmi*");
99033
99034 diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
99035 index 8247979..815ec76 100644
99036 --- a/scripts/mod/modpost.c
99037 +++ b/scripts/mod/modpost.c
99038 @@ -931,6 +931,7 @@ enum mismatch {
99039 ANY_INIT_TO_ANY_EXIT,
99040 ANY_EXIT_TO_ANY_INIT,
99041 EXPORT_TO_INIT_EXIT,
99042 + DATA_TO_TEXT
99043 };
99044
99045 struct sectioncheck {
99046 @@ -1017,6 +1018,12 @@ const struct sectioncheck sectioncheck[] = {
99047 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
99048 .mismatch = EXPORT_TO_INIT_EXIT,
99049 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
99050 +},
99051 +/* Do not reference code from writable data */
99052 +{
99053 + .fromsec = { DATA_SECTIONS, NULL },
99054 + .tosec = { TEXT_SECTIONS, NULL },
99055 + .mismatch = DATA_TO_TEXT
99056 }
99057 };
99058
99059 @@ -1137,10 +1144,10 @@ static Elf_Sym *find_elf_symbol(struct elf_info *elf, Elf64_Sword addr,
99060 continue;
99061 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
99062 continue;
99063 - if (sym->st_value == addr)
99064 - return sym;
99065 /* Find a symbol nearby - addr are maybe negative */
99066 d = sym->st_value - addr;
99067 + if (d == 0)
99068 + return sym;
99069 if (d < 0)
99070 d = addr - sym->st_value;
99071 if (d < distance) {
99072 @@ -1418,6 +1425,14 @@ static void report_sec_mismatch(const char *modname,
99073 tosym, prl_to, prl_to, tosym);
99074 free(prl_to);
99075 break;
99076 + case DATA_TO_TEXT:
99077 +#if 0
99078 + fprintf(stderr,
99079 + "The %s %s:%s references\n"
99080 + "the %s %s:%s%s\n",
99081 + from, fromsec, fromsym, to, tosec, tosym, to_p);
99082 +#endif
99083 + break;
99084 }
99085 fprintf(stderr, "\n");
99086 }
99087 @@ -1652,7 +1667,7 @@ static void section_rel(const char *modname, struct elf_info *elf,
99088 static void check_sec_ref(struct module *mod, const char *modname,
99089 struct elf_info *elf)
99090 {
99091 - int i;
99092 + unsigned int i;
99093 Elf_Shdr *sechdrs = elf->sechdrs;
99094
99095 /* Walk through all sections */
99096 @@ -1771,7 +1786,7 @@ void __attribute__((format(printf, 2, 3))) buf_printf(struct buffer *buf,
99097 va_end(ap);
99098 }
99099
99100 -void buf_write(struct buffer *buf, const char *s, int len)
99101 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
99102 {
99103 if (buf->size - buf->pos < len) {
99104 buf->size += len + SZ;
99105 @@ -1990,7 +2005,7 @@ static void write_if_changed(struct buffer *b, const char *fname)
99106 if (fstat(fileno(file), &st) < 0)
99107 goto close_write;
99108
99109 - if (st.st_size != b->pos)
99110 + if (st.st_size != (off_t)b->pos)
99111 goto close_write;
99112
99113 tmp = NOFAIL(malloc(b->pos));
99114 diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h
99115 index 51207e4..f7d603d 100644
99116 --- a/scripts/mod/modpost.h
99117 +++ b/scripts/mod/modpost.h
99118 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *expr);
99119
99120 struct buffer {
99121 char *p;
99122 - int pos;
99123 - int size;
99124 + unsigned int pos;
99125 + unsigned int size;
99126 };
99127
99128 void __attribute__((format(printf, 2, 3)))
99129 buf_printf(struct buffer *buf, const char *fmt, ...);
99130
99131 void
99132 -buf_write(struct buffer *buf, const char *s, int len);
99133 +buf_write(struct buffer *buf, const char *s, unsigned int len);
99134
99135 struct module {
99136 struct module *next;
99137 diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c
99138 index 9dfcd6d..099068e 100644
99139 --- a/scripts/mod/sumversion.c
99140 +++ b/scripts/mod/sumversion.c
99141 @@ -470,7 +470,7 @@ static void write_version(const char *filename, const char *sum,
99142 goto out;
99143 }
99144
99145 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
99146 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
99147 warn("writing sum in %s failed: %s\n",
99148 filename, strerror(errno));
99149 goto out;
99150 diff --git a/scripts/module-common.lds b/scripts/module-common.lds
99151 index 0865b3e..7235dd4 100644
99152 --- a/scripts/module-common.lds
99153 +++ b/scripts/module-common.lds
99154 @@ -6,6 +6,10 @@
99155 SECTIONS {
99156 /DISCARD/ : { *(.discard) }
99157
99158 + .rodata : {
99159 + *(.rodata) *(.rodata.*)
99160 + *(.data..read_only)
99161 + }
99162 __ksymtab : { *(SORT(___ksymtab+*)) }
99163 __ksymtab_gpl : { *(SORT(___ksymtab_gpl+*)) }
99164 __ksymtab_unused : { *(SORT(___ksymtab_unused+*)) }
99165 diff --git a/scripts/package/builddeb b/scripts/package/builddeb
99166 index 90e521f..e9eaf8f 100644
99167 --- a/scripts/package/builddeb
99168 +++ b/scripts/package/builddeb
99169 @@ -281,6 +281,7 @@ fi
99170 (cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
99171 (cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
99172 (cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
99173 +(cd $objtree; find tools/gcc -name \*.so >> "$objtree/debian/hdrobjfiles")
99174 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
99175 mkdir -p "$destdir"
99176 (cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
99177 diff --git a/scripts/pnmtologo.c b/scripts/pnmtologo.c
99178 index 68bb4ef..2f419e1 100644
99179 --- a/scripts/pnmtologo.c
99180 +++ b/scripts/pnmtologo.c
99181 @@ -244,14 +244,14 @@ static void write_header(void)
99182 fprintf(out, " * Linux logo %s\n", logoname);
99183 fputs(" */\n\n", out);
99184 fputs("#include <linux/linux_logo.h>\n\n", out);
99185 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
99186 + fprintf(out, "static unsigned char %s_data[] = {\n",
99187 logoname);
99188 }
99189
99190 static void write_footer(void)
99191 {
99192 fputs("\n};\n\n", out);
99193 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
99194 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
99195 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
99196 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
99197 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
99198 @@ -381,7 +381,7 @@ static void write_logo_clut224(void)
99199 fputs("\n};\n\n", out);
99200
99201 /* write logo clut */
99202 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
99203 + fprintf(out, "static unsigned char %s_clut[] = {\n",
99204 logoname);
99205 write_hex_cnt = 0;
99206 for (i = 0; i < logo_clutsize; i++) {
99207 diff --git a/scripts/sortextable.h b/scripts/sortextable.h
99208 index f5eb43d..1814de8 100644
99209 --- a/scripts/sortextable.h
99210 +++ b/scripts/sortextable.h
99211 @@ -106,9 +106,9 @@ do_func(Elf_Ehdr *ehdr, char const *const fname, table_sort_t custom_sort)
99212 const char *secstrtab;
99213 const char *strtab;
99214 char *extab_image;
99215 - int extab_index = 0;
99216 - int i;
99217 - int idx;
99218 + unsigned int extab_index = 0;
99219 + unsigned int i;
99220 + unsigned int idx;
99221
99222 shdr = (Elf_Shdr *)((char *)ehdr + _r(&ehdr->e_shoff));
99223 shstrtab_sec = shdr + r2(&ehdr->e_shstrndx);
99224 diff --git a/security/Kconfig b/security/Kconfig
99225 index e9c6ac7..8433964 100644
99226 --- a/security/Kconfig
99227 +++ b/security/Kconfig
99228 @@ -4,6 +4,959 @@
99229
99230 menu "Security options"
99231
99232 +menu "Grsecurity"
99233 +
99234 + config ARCH_TRACK_EXEC_LIMIT
99235 + bool
99236 +
99237 + config PAX_KERNEXEC_PLUGIN
99238 + bool
99239 +
99240 + config PAX_PER_CPU_PGD
99241 + bool
99242 +
99243 + config TASK_SIZE_MAX_SHIFT
99244 + int
99245 + depends on X86_64
99246 + default 47 if !PAX_PER_CPU_PGD
99247 + default 42 if PAX_PER_CPU_PGD
99248 +
99249 + config PAX_ENABLE_PAE
99250 + bool
99251 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
99252 +
99253 + config PAX_USERCOPY_SLABS
99254 + bool
99255 +
99256 +config GRKERNSEC
99257 + bool "Grsecurity"
99258 + select CRYPTO
99259 + select CRYPTO_SHA256
99260 + select PROC_FS
99261 + select STOP_MACHINE
99262 + select TTY
99263 + select DEBUG_LIST
99264 + help
99265 + If you say Y here, you will be able to configure many features
99266 + that will enhance the security of your system. It is highly
99267 + recommended that you say Y here and read through the help
99268 + for each option so that you fully understand the features and
99269 + can evaluate their usefulness for your machine.
99270 +
99271 +choice
99272 + prompt "Configuration Method"
99273 + depends on GRKERNSEC
99274 + default GRKERNSEC_CONFIG_CUSTOM
99275 + help
99276 +
99277 +config GRKERNSEC_CONFIG_AUTO
99278 + bool "Automatic"
99279 + help
99280 + If you choose this configuration method, you'll be able to answer a small
99281 + number of simple questions about how you plan to use this kernel.
99282 + The settings of grsecurity and PaX will be automatically configured for
99283 + the highest commonly-used settings within the provided constraints.
99284 +
99285 + If you require additional configuration, custom changes can still be made
99286 + from the "custom configuration" menu.
99287 +
99288 +config GRKERNSEC_CONFIG_CUSTOM
99289 + bool "Custom"
99290 + help
99291 + If you choose this configuration method, you'll be able to configure all
99292 + grsecurity and PaX settings manually. Via this method, no options are
99293 + automatically enabled.
99294 +
99295 +endchoice
99296 +
99297 +choice
99298 + prompt "Usage Type"
99299 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
99300 + default GRKERNSEC_CONFIG_SERVER
99301 + help
99302 +
99303 +config GRKERNSEC_CONFIG_SERVER
99304 + bool "Server"
99305 + help
99306 + Choose this option if you plan to use this kernel on a server.
99307 +
99308 +config GRKERNSEC_CONFIG_DESKTOP
99309 + bool "Desktop"
99310 + help
99311 + Choose this option if you plan to use this kernel on a desktop.
99312 +
99313 +endchoice
99314 +
99315 +choice
99316 + prompt "Virtualization Type"
99317 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO)
99318 + default GRKERNSEC_CONFIG_VIRT_NONE
99319 + help
99320 +
99321 +config GRKERNSEC_CONFIG_VIRT_NONE
99322 + bool "None"
99323 + help
99324 + Choose this option if this kernel will be run on bare metal.
99325 +
99326 +config GRKERNSEC_CONFIG_VIRT_GUEST
99327 + bool "Guest"
99328 + help
99329 + Choose this option if this kernel will be run as a VM guest.
99330 +
99331 +config GRKERNSEC_CONFIG_VIRT_HOST
99332 + bool "Host"
99333 + help
99334 + Choose this option if this kernel will be run as a VM host.
99335 +
99336 +endchoice
99337 +
99338 +choice
99339 + prompt "Virtualization Hardware"
99340 + depends on (GRKERNSEC && X86 && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
99341 + help
99342 +
99343 +config GRKERNSEC_CONFIG_VIRT_EPT
99344 + bool "EPT/RVI Processor Support"
99345 + depends on X86
99346 + help
99347 + Choose this option if your CPU supports the EPT or RVI features of 2nd-gen
99348 + hardware virtualization. This allows for additional kernel hardening protections
99349 + to operate without additional performance impact.
99350 +
99351 + To see if your Intel processor supports EPT, see:
99352 + http://ark.intel.com/Products/VirtualizationTechnology
99353 + (Most Core i3/5/7 support EPT)
99354 +
99355 + To see if your AMD processor supports RVI, see:
99356 + http://support.amd.com/us/kbarticles/Pages/GPU120AMDRVICPUsHyperVWin8.aspx
99357 +
99358 +config GRKERNSEC_CONFIG_VIRT_SOFT
99359 + bool "First-gen/No Hardware Virtualization"
99360 + help
99361 + Choose this option if you use an Atom/Pentium/Core 2 processor that either doesn't
99362 + support hardware virtualization or doesn't support the EPT/RVI extensions.
99363 +
99364 +endchoice
99365 +
99366 +choice
99367 + prompt "Virtualization Software"
99368 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_GUEST || GRKERNSEC_CONFIG_VIRT_HOST))
99369 + help
99370 +
99371 +config GRKERNSEC_CONFIG_VIRT_XEN
99372 + bool "Xen"
99373 + help
99374 + Choose this option if this kernel is running as a Xen guest or host.
99375 +
99376 +config GRKERNSEC_CONFIG_VIRT_VMWARE
99377 + bool "VMWare"
99378 + help
99379 + Choose this option if this kernel is running as a VMWare guest or host.
99380 +
99381 +config GRKERNSEC_CONFIG_VIRT_KVM
99382 + bool "KVM"
99383 + help
99384 + Choose this option if this kernel is running as a KVM guest or host.
99385 +
99386 +config GRKERNSEC_CONFIG_VIRT_VIRTUALBOX
99387 + bool "VirtualBox"
99388 + help
99389 + Choose this option if this kernel is running as a VirtualBox guest or host.
99390 +
99391 +endchoice
99392 +
99393 +choice
99394 + prompt "Required Priorities"
99395 + depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
99396 + default GRKERNSEC_CONFIG_PRIORITY_PERF
99397 + help
99398 +
99399 +config GRKERNSEC_CONFIG_PRIORITY_PERF
99400 + bool "Performance"
99401 + help
99402 + Choose this option if performance is of highest priority for this deployment
99403 + of grsecurity. Features like UDEREF on a 64bit kernel, kernel stack clearing,
99404 + clearing of structures intended for userland, and freed memory sanitizing will
99405 + be disabled.
99406 +
99407 +config GRKERNSEC_CONFIG_PRIORITY_SECURITY
99408 + bool "Security"
99409 + help
99410 + Choose this option if security is of highest priority for this deployment of
99411 + grsecurity. UDEREF, kernel stack clearing, clearing of structures intended
99412 + for userland, and freed memory sanitizing will be enabled for this kernel.
99413 + In a worst-case scenario, these features can introduce a 20% performance hit
99414 + (UDEREF on x64 contributing half of this hit).
99415 +
99416 +endchoice
99417 +
99418 +menu "Default Special Groups"
99419 +depends on (GRKERNSEC && GRKERNSEC_CONFIG_AUTO)
99420 +
99421 +config GRKERNSEC_PROC_GID
99422 + int "GID exempted from /proc restrictions"
99423 + default 1001
99424 + help
99425 + Setting this GID determines which group will be exempted from
99426 + grsecurity's /proc restrictions, allowing users of the specified
99427 + group to view network statistics and the existence of other users'
99428 + processes on the system. This GID may also be chosen at boot time
99429 + via "grsec_proc_gid=" on the kernel commandline.
99430 +
99431 +config GRKERNSEC_TPE_UNTRUSTED_GID
99432 + int "GID for TPE-untrusted users"
99433 + depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
99434 + default 1005
99435 + help
99436 + Setting this GID determines which group untrusted users should
99437 + be added to. These users will be placed under grsecurity's Trusted Path
99438 + Execution mechanism, preventing them from executing their own binaries.
99439 + The users will only be able to execute binaries in directories owned and
99440 + writable only by the root user. If the sysctl option is enabled, a sysctl
99441 + option with name "tpe_gid" is created.
99442 +
99443 +config GRKERNSEC_TPE_TRUSTED_GID
99444 + int "GID for TPE-trusted users"
99445 + depends on GRKERNSEC_CONFIG_SERVER && GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
99446 + default 1005
99447 + help
99448 + Setting this GID determines what group TPE restrictions will be
99449 + *disabled* for. If the sysctl option is enabled, a sysctl option
99450 + with name "tpe_gid" is created.
99451 +
99452 +config GRKERNSEC_SYMLINKOWN_GID
99453 + int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
99454 + depends on GRKERNSEC_CONFIG_SERVER
99455 + default 1006
99456 + help
99457 + Setting this GID determines what group kernel-enforced
99458 + SymlinksIfOwnerMatch will be enabled for. If the sysctl option
99459 + is enabled, a sysctl option with name "symlinkown_gid" is created.
99460 +
99461 +
99462 +endmenu
99463 +
99464 +menu "Customize Configuration"
99465 +depends on GRKERNSEC
99466 +
99467 +menu "PaX"
99468 +
99469 +config PAX
99470 + bool "Enable various PaX features"
99471 + default y if GRKERNSEC_CONFIG_AUTO
99472 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
99473 + help
99474 + This allows you to enable various PaX features. PaX adds
99475 + intrusion prevention mechanisms to the kernel that reduce
99476 + the risks posed by exploitable memory corruption bugs.
99477 +
99478 +menu "PaX Control"
99479 + depends on PAX
99480 +
99481 +config PAX_SOFTMODE
99482 + bool 'Support soft mode'
99483 + help
99484 + Enabling this option will allow you to run PaX in soft mode, that
99485 + is, PaX features will not be enforced by default, only on executables
99486 + marked explicitly. You must also enable PT_PAX_FLAGS or XATTR_PAX_FLAGS
99487 + support as they are the only way to mark executables for soft mode use.
99488 +
99489 + Soft mode can be activated by using the "pax_softmode=1" kernel command
99490 + line option on boot. Furthermore you can control various PaX features
99491 + at runtime via the entries in /proc/sys/kernel/pax.
99492 +
99493 +config PAX_EI_PAX
99494 + bool 'Use legacy ELF header marking'
99495 + default y if GRKERNSEC_CONFIG_AUTO
99496 + help
99497 + Enabling this option will allow you to control PaX features on
99498 + a per executable basis via the 'chpax' utility available at
99499 + http://pax.grsecurity.net/. The control flags will be read from
99500 + an otherwise reserved part of the ELF header. This marking has
99501 + numerous drawbacks (no support for soft-mode, toolchain does not
99502 + know about the non-standard use of the ELF header) therefore it
99503 + has been deprecated in favour of PT_PAX_FLAGS and XATTR_PAX_FLAGS
99504 + support.
99505 +
99506 + Note that if you enable PT_PAX_FLAGS or XATTR_PAX_FLAGS marking
99507 + support as well, they will override the legacy EI_PAX marks.
99508 +
99509 + If you enable none of the marking options then all applications
99510 + will run with PaX enabled on them by default.
99511 +
99512 +config PAX_PT_PAX_FLAGS
99513 + bool 'Use ELF program header marking'
99514 + default y if GRKERNSEC_CONFIG_AUTO
99515 + help
99516 + Enabling this option will allow you to control PaX features on
99517 + a per executable basis via the 'paxctl' utility available at
99518 + http://pax.grsecurity.net/. The control flags will be read from
99519 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
99520 + has the benefits of supporting both soft mode and being fully
99521 + integrated into the toolchain (the binutils patch is available
99522 + from http://pax.grsecurity.net).
99523 +
99524 + Note that if you enable the legacy EI_PAX marking support as well,
99525 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
99526 +
99527 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
99528 + must make sure that the marks are the same if a binary has both marks.
99529 +
99530 + If you enable none of the marking options then all applications
99531 + will run with PaX enabled on them by default.
99532 +
99533 +config PAX_XATTR_PAX_FLAGS
99534 + bool 'Use filesystem extended attributes marking'
99535 + default y if GRKERNSEC_CONFIG_AUTO
99536 + select CIFS_XATTR if CIFS
99537 + select EXT2_FS_XATTR if EXT2_FS
99538 + select EXT3_FS_XATTR if EXT3_FS
99539 + select EXT4_FS_XATTR if EXT4_FS
99540 + select JFFS2_FS_XATTR if JFFS2_FS
99541 + select REISERFS_FS_XATTR if REISERFS_FS
99542 + select SQUASHFS_XATTR if SQUASHFS
99543 + select TMPFS_XATTR if TMPFS
99544 + select UBIFS_FS_XATTR if UBIFS_FS
99545 + help
99546 + Enabling this option will allow you to control PaX features on
99547 + a per executable basis via the 'setfattr' utility. The control
99548 + flags will be read from the user.pax.flags extended attribute of
99549 + the file. This marking has the benefit of supporting binary-only
99550 + applications that self-check themselves (e.g., skype) and would
99551 + not tolerate chpax/paxctl changes. The main drawback is that
99552 + extended attributes are not supported by some filesystems (e.g.,
99553 + isofs, udf, vfat) so copying files through such filesystems will
99554 + lose the extended attributes and these PaX markings.
99555 +
99556 + Note that if you enable the legacy EI_PAX marking support as well,
99557 + the EI_PAX marks will be overridden by the XATTR_PAX_FLAGS marks.
99558 +
99559 + If you enable both PT_PAX_FLAGS and XATTR_PAX_FLAGS support then you
99560 + must make sure that the marks are the same if a binary has both marks.
99561 +
99562 + If you enable none of the marking options then all applications
99563 + will run with PaX enabled on them by default.
99564 +
99565 +choice
99566 + prompt 'MAC system integration'
99567 + default PAX_HAVE_ACL_FLAGS
99568 + help
99569 + Mandatory Access Control systems have the option of controlling
99570 + PaX flags on a per executable basis, choose the method supported
99571 + by your particular system.
99572 +
99573 + - "none": if your MAC system does not interact with PaX,
99574 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
99575 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
99576 +
99577 + NOTE: this option is for developers/integrators only.
99578 +
99579 + config PAX_NO_ACL_FLAGS
99580 + bool 'none'
99581 +
99582 + config PAX_HAVE_ACL_FLAGS
99583 + bool 'direct'
99584 +
99585 + config PAX_HOOK_ACL_FLAGS
99586 + bool 'hook'
99587 +endchoice
99588 +
99589 +endmenu
99590 +
99591 +menu "Non-executable pages"
99592 + depends on PAX
99593 +
99594 +config PAX_NOEXEC
99595 + bool "Enforce non-executable pages"
99596 + default y if GRKERNSEC_CONFIG_AUTO
99597 + depends on ALPHA || (ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86
99598 + help
99599 + By design some architectures do not allow for protecting memory
99600 + pages against execution or even if they do, Linux does not make
99601 + use of this feature. In practice this means that if a page is
99602 + readable (such as the stack or heap) it is also executable.
99603 +
99604 + There is a well known exploit technique that makes use of this
99605 + fact and a common programming mistake where an attacker can
99606 + introduce code of his choice somewhere in the attacked program's
99607 + memory (typically the stack or the heap) and then execute it.
99608 +
99609 + If the attacked program was running with different (typically
99610 + higher) privileges than that of the attacker, then he can elevate
99611 + his own privilege level (e.g. get a root shell, write to files for
99612 + which he does not have write access to, etc).
99613 +
99614 + Enabling this option will let you choose from various features
99615 + that prevent the injection and execution of 'foreign' code in
99616 + a program.
99617 +
99618 + This will also break programs that rely on the old behaviour and
99619 + expect that dynamically allocated memory via the malloc() family
99620 + of functions is executable (which it is not). Notable examples
99621 + are the XFree86 4.x server, the java runtime and wine.
99622 +
99623 +config PAX_PAGEEXEC
99624 + bool "Paging based non-executable pages"
99625 + default y if GRKERNSEC_CONFIG_AUTO
99626 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
99627 + select ARCH_TRACK_EXEC_LIMIT if X86_32
99628 + help
99629 + This implementation is based on the paging feature of the CPU.
99630 + On i386 without hardware non-executable bit support there is a
99631 + variable but usually low performance impact, however on Intel's
99632 + P4 core based CPUs it is very high so you should not enable this
99633 + for kernels meant to be used on such CPUs.
99634 +
99635 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
99636 + with hardware non-executable bit support there is no performance
99637 + impact, on ppc the impact is negligible.
99638 +
99639 + Note that several architectures require various emulations due to
99640 + badly designed userland ABIs, this will cause a performance impact
99641 + but will disappear as soon as userland is fixed. For example, ppc
99642 + userland MUST have been built with secure-plt by a recent toolchain.
99643 +
99644 +config PAX_SEGMEXEC
99645 + bool "Segmentation based non-executable pages"
99646 + default y if GRKERNSEC_CONFIG_AUTO
99647 + depends on PAX_NOEXEC && X86_32
99648 + help
99649 + This implementation is based on the segmentation feature of the
99650 + CPU and has a very small performance impact, however applications
99651 + will be limited to a 1.5 GB address space instead of the normal
99652 + 3 GB.
99653 +
99654 +config PAX_EMUTRAMP
99655 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
99656 + default y if PARISC
99657 + help
99658 + There are some programs and libraries that for one reason or
99659 + another attempt to execute special small code snippets from
99660 + non-executable memory pages. Most notable examples are the
99661 + signal handler return code generated by the kernel itself and
99662 + the GCC trampolines.
99663 +
99664 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
99665 + such programs will no longer work under your kernel.
99666 +
99667 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
99668 + utilities to enable trampoline emulation for the affected programs
99669 + yet still have the protection provided by the non-executable pages.
99670 +
99671 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
99672 + your system will not even boot.
99673 +
99674 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
99675 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
99676 + for the affected files.
99677 +
99678 + NOTE: enabling this feature *may* open up a loophole in the
99679 + protection provided by non-executable pages that an attacker
99680 + could abuse. Therefore the best solution is to not have any
99681 + files on your system that would require this option. This can
99682 + be achieved by not using libc5 (which relies on the kernel
99683 + signal handler return code) and not using or rewriting programs
99684 + that make use of the nested function implementation of GCC.
99685 + Skilled users can just fix GCC itself so that it implements
99686 + nested function calls in a way that does not interfere with PaX.
99687 +
99688 +config PAX_EMUSIGRT
99689 + bool "Automatically emulate sigreturn trampolines"
99690 + depends on PAX_EMUTRAMP && PARISC
99691 + default y
99692 + help
99693 + Enabling this option will have the kernel automatically detect
99694 + and emulate signal return trampolines executing on the stack
99695 + that would otherwise lead to task termination.
99696 +
99697 + This solution is intended as a temporary one for users with
99698 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
99699 + Modula-3 runtime, etc) or executables linked to such, basically
99700 + everything that does not specify its own SA_RESTORER function in
99701 + normal executable memory like glibc 2.1+ does.
99702 +
99703 + On parisc you MUST enable this option, otherwise your system will
99704 + not even boot.
99705 +
99706 + NOTE: this feature cannot be disabled on a per executable basis
99707 + and since it *does* open up a loophole in the protection provided
99708 + by non-executable pages, the best solution is to not have any
99709 + files on your system that would require this option.
99710 +
99711 +config PAX_MPROTECT
99712 + bool "Restrict mprotect()"
99713 + default y if GRKERNSEC_CONFIG_AUTO
99714 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
99715 + help
99716 + Enabling this option will prevent programs from
99717 + - changing the executable status of memory pages that were
99718 + not originally created as executable,
99719 + - making read-only executable pages writable again,
99720 + - creating executable pages from anonymous memory,
99721 + - making read-only-after-relocations (RELRO) data pages writable again.
99722 +
99723 + You should say Y here to complete the protection provided by
99724 + the enforcement of non-executable pages.
99725 +
99726 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
99727 + this feature on a per file basis.
99728 +
99729 +config PAX_MPROTECT_COMPAT
99730 + bool "Use legacy/compat protection demoting (read help)"
99731 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
99732 + depends on PAX_MPROTECT
99733 + help
99734 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
99735 + by sending the proper error code to the application. For some broken
99736 + userland, this can cause problems with Python or other applications. The
99737 + current implementation however allows for applications like clamav to
99738 + detect if JIT compilation/execution is allowed and to fall back gracefully
99739 + to an interpreter-based mode if it does not. While we encourage everyone
99740 + to use the current implementation as-is and push upstream to fix broken
99741 + userland (note that the RWX logging option can assist with this), in some
99742 + environments this may not be possible. Having to disable MPROTECT
99743 + completely on certain binaries reduces the security benefit of PaX,
99744 + so this option is provided for those environments to revert to the old
99745 + behavior.
99746 +
99747 +config PAX_ELFRELOCS
99748 + bool "Allow ELF text relocations (read help)"
99749 + depends on PAX_MPROTECT
99750 + default n
99751 + help
99752 + Non-executable pages and mprotect() restrictions are effective
99753 + in preventing the introduction of new executable code into an
99754 + attacked task's address space. There remain only two venues
99755 + for this kind of attack: if the attacker can execute already
99756 + existing code in the attacked task then he can either have it
99757 + create and mmap() a file containing his code or have it mmap()
99758 + an already existing ELF library that does not have position
99759 + independent code in it and use mprotect() on it to make it
99760 + writable and copy his code there. While protecting against
99761 + the former approach is beyond PaX, the latter can be prevented
99762 + by having only PIC ELF libraries on one's system (which do not
99763 + need to relocate their code). If you are sure this is your case,
99764 + as is the case with all modern Linux distributions, then leave
99765 + this option disabled. You should say 'n' here.
99766 +
99767 +config PAX_ETEXECRELOCS
99768 + bool "Allow ELF ET_EXEC text relocations"
99769 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
99770 + select PAX_ELFRELOCS
99771 + default y
99772 + help
99773 + On some architectures there are incorrectly created applications
99774 + that require text relocations and would not work without enabling
99775 + this option. If you are an alpha, ia64 or parisc user, you should
99776 + enable this option and disable it once you have made sure that
99777 + none of your applications need it.
99778 +
99779 +config PAX_EMUPLT
99780 + bool "Automatically emulate ELF PLT"
99781 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
99782 + default y
99783 + help
99784 + Enabling this option will have the kernel automatically detect
99785 + and emulate the Procedure Linkage Table entries in ELF files.
99786 + On some architectures such entries are in writable memory, and
99787 + become non-executable leading to task termination. Therefore
99788 + it is mandatory that you enable this option on alpha, parisc,
99789 + sparc and sparc64, otherwise your system would not even boot.
99790 +
99791 + NOTE: this feature *does* open up a loophole in the protection
99792 + provided by the non-executable pages, therefore the proper
99793 + solution is to modify the toolchain to produce a PLT that does
99794 + not need to be writable.
99795 +
99796 +config PAX_DLRESOLVE
99797 + bool 'Emulate old glibc resolver stub'
99798 + depends on PAX_EMUPLT && SPARC
99799 + default n
99800 + help
99801 + This option is needed if userland has an old glibc (before 2.4)
99802 + that puts a 'save' instruction into the runtime generated resolver
99803 + stub that needs special emulation.
99804 +
99805 +config PAX_KERNEXEC
99806 + bool "Enforce non-executable kernel pages"
99807 + default y if GRKERNSEC_CONFIG_AUTO && (GRKERNSEC_CONFIG_VIRT_NONE || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_GUEST) || (GRKERNSEC_CONFIG_VIRT_EPT && GRKERNSEC_CONFIG_VIRT_KVM))
99808 + depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !(ARM_LPAE && MODULES))) && !XEN
99809 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
99810 + select PAX_KERNEXEC_PLUGIN if X86_64
99811 + help
99812 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
99813 + that is, enabling this option will make it harder to inject
99814 + and execute 'foreign' code in kernel memory itself.
99815 +
99816 +choice
99817 + prompt "Return Address Instrumentation Method"
99818 + default PAX_KERNEXEC_PLUGIN_METHOD_BTS
99819 + depends on PAX_KERNEXEC_PLUGIN
99820 + help
99821 + Select the method used to instrument function pointer dereferences.
99822 + Note that binary modules cannot be instrumented by this approach.
99823 +
99824 + Note that the implementation requires a gcc with plugin support,
99825 + i.e., gcc 4.5 or newer. You may need to install the supporting
99826 + headers explicitly in addition to the normal gcc package.
99827 +
99828 + config PAX_KERNEXEC_PLUGIN_METHOD_BTS
99829 + bool "bts"
99830 + help
99831 + This method is compatible with binary only modules but has
99832 + a higher runtime overhead.
99833 +
99834 + config PAX_KERNEXEC_PLUGIN_METHOD_OR
99835 + bool "or"
99836 + depends on !PARAVIRT
99837 + help
99838 + This method is incompatible with binary only modules but has
99839 + a lower runtime overhead.
99840 +endchoice
99841 +
99842 +config PAX_KERNEXEC_PLUGIN_METHOD
99843 + string
99844 + default "bts" if PAX_KERNEXEC_PLUGIN_METHOD_BTS
99845 + default "or" if PAX_KERNEXEC_PLUGIN_METHOD_OR
99846 + default ""
99847 +
99848 +config PAX_KERNEXEC_MODULE_TEXT
99849 + int "Minimum amount of memory reserved for module code"
99850 + default "4" if (!GRKERNSEC_CONFIG_AUTO || GRKERNSEC_CONFIG_SERVER)
99851 + default "12" if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_DESKTOP)
99852 + depends on PAX_KERNEXEC && X86_32
99853 + help
99854 + Due to implementation details the kernel must reserve a fixed
99855 + amount of memory for runtime allocated code (such as modules)
99856 + at compile time that cannot be changed at runtime. Here you
99857 + can specify the minimum amount in MB that will be reserved.
99858 + Due to the same implementation details this size will always
99859 + be rounded up to the next 2/4 MB boundary (depends on PAE) so
99860 + the actually available memory for runtime allocated code will
99861 + usually be more than this minimum.
99862 +
99863 + The default 4 MB should be enough for most users but if you have
99864 + an excessive number of modules (e.g., most distribution configs
99865 + compile many drivers as modules) or use huge modules such as
99866 + nvidia's kernel driver, you will need to adjust this amount.
99867 + A good rule of thumb is to look at your currently loaded kernel
99868 + modules and add up their sizes.
99869 +
99870 +endmenu
99871 +
99872 +menu "Address Space Layout Randomization"
99873 + depends on PAX
99874 +
99875 +config PAX_ASLR
99876 + bool "Address Space Layout Randomization"
99877 + default y if GRKERNSEC_CONFIG_AUTO
99878 + help
99879 + Many if not most exploit techniques rely on the knowledge of
99880 + certain addresses in the attacked program. The following options
99881 + will allow the kernel to apply a certain amount of randomization
99882 + to specific parts of the program thereby forcing an attacker to
99883 + guess them in most cases. Any failed guess will most likely crash
99884 + the attacked program which allows the kernel to detect such attempts
99885 + and react on them. PaX itself provides no reaction mechanisms,
99886 + instead it is strongly encouraged that you make use of Nergal's
99887 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
99888 + (http://www.grsecurity.net/) built-in crash detection features or
99889 + develop one yourself.
99890 +
99891 + By saying Y here you can choose to randomize the following areas:
99892 + - top of the task's kernel stack
99893 + - top of the task's userland stack
99894 + - base address for mmap() requests that do not specify one
99895 + (this includes all libraries)
99896 + - base address of the main executable
99897 +
99898 + It is strongly recommended to say Y here as address space layout
99899 + randomization has negligible impact on performance yet it provides
99900 + a very effective protection.
99901 +
99902 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
99903 + this feature on a per file basis.
99904 +
99905 +config PAX_RANDKSTACK
99906 + bool "Randomize kernel stack base"
99907 + default y if GRKERNSEC_CONFIG_AUTO && !(GRKERNSEC_CONFIG_VIRT_HOST && GRKERNSEC_CONFIG_VIRT_VIRTUALBOX)
99908 + depends on X86_TSC && X86
99909 + help
99910 + By saying Y here the kernel will randomize every task's kernel
99911 + stack on every system call. This will not only force an attacker
99912 + to guess it but also prevent him from making use of possible
99913 + leaked information about it.
99914 +
99915 + Since the kernel stack is a rather scarce resource, randomization
99916 + may cause unexpected stack overflows, therefore you should very
99917 + carefully test your system. Note that once enabled in the kernel
99918 + configuration, this feature cannot be disabled on a per file basis.
99919 +
99920 +config PAX_RANDUSTACK
99921 + bool "Randomize user stack base"
99922 + default y if GRKERNSEC_CONFIG_AUTO
99923 + depends on PAX_ASLR
99924 + help
99925 + By saying Y here the kernel will randomize every task's userland
99926 + stack. The randomization is done in two steps where the second
99927 + one may apply a big amount of shift to the top of the stack and
99928 + cause problems for programs that want to use lots of memory (more
99929 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
99930 + For this reason the second step can be controlled by 'chpax' or
99931 + 'paxctl' on a per file basis.
99932 +
99933 +config PAX_RANDMMAP
99934 + bool "Randomize mmap() base"
99935 + default y if GRKERNSEC_CONFIG_AUTO
99936 + depends on PAX_ASLR
99937 + help
99938 + By saying Y here the kernel will use a randomized base address for
99939 + mmap() requests that do not specify one themselves. As a result
99940 + all dynamically loaded libraries will appear at random addresses
99941 + and therefore be harder to exploit by a technique where an attacker
99942 + attempts to execute library code for his purposes (e.g. spawn a
99943 + shell from an exploited program that is running at an elevated
99944 + privilege level).
99945 +
99946 + Furthermore, if a program is relinked as a dynamic ELF file, its
99947 + base address will be randomized as well, completing the full
99948 + randomization of the address space layout. Attacking such programs
99949 + becomes a guess game. You can find an example of doing this at
99950 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
99951 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
99952 +
99953 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
99954 + feature on a per file basis.
99955 +
99956 +endmenu
99957 +
99958 +menu "Miscellaneous hardening features"
99959 +
99960 +config PAX_MEMORY_SANITIZE
99961 + bool "Sanitize all freed memory"
99962 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
99963 + help
99964 + By saying Y here the kernel will erase memory pages and slab objects
99965 + as soon as they are freed. This in turn reduces the lifetime of data
99966 + stored in them, making it less likely that sensitive information such
99967 + as passwords, cryptographic secrets, etc stay in memory for too long.
99968 +
99969 + This is especially useful for programs whose runtime is short, long
99970 + lived processes and the kernel itself benefit from this as long as
99971 + they ensure timely freeing of memory that may hold sensitive
99972 + information.
99973 +
99974 + A nice side effect of the sanitization of slab objects is the
99975 + reduction of possible info leaks caused by padding bytes within the
99976 + leaky structures. Use-after-free bugs for structures containing
99977 + pointers can also be detected as dereferencing the sanitized pointer
99978 + will generate an access violation.
99979 +
99980 + The tradeoff is performance impact, on a single CPU system kernel
99981 + compilation sees a 3% slowdown, other systems and workloads may vary
99982 + and you are advised to test this feature on your expected workload
99983 + before deploying it.
99984 +
99985 + To reduce the performance penalty by sanitizing pages only, albeit
99986 + limiting the effectiveness of this feature at the same time, slab
99987 + sanitization can be disabled with the kernel commandline parameter
99988 + "pax_sanitize_slab=0".
99989 +
99990 + Note that this feature does not protect data stored in live pages,
99991 + e.g., process memory swapped to disk may stay there for a long time.
99992 +
99993 +config PAX_MEMORY_STACKLEAK
99994 + bool "Sanitize kernel stack"
99995 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
99996 + depends on X86
99997 + help
99998 + By saying Y here the kernel will erase the kernel stack before it
99999 + returns from a system call. This in turn reduces the information
100000 + that a kernel stack leak bug can reveal.
100001 +
100002 + Note that such a bug can still leak information that was put on
100003 + the stack by the current system call (the one eventually triggering
100004 + the bug) but traces of earlier system calls on the kernel stack
100005 + cannot leak anymore.
100006 +
100007 + The tradeoff is performance impact: on a single CPU system kernel
100008 + compilation sees a 1% slowdown, other systems and workloads may vary
100009 + and you are advised to test this feature on your expected workload
100010 + before deploying it.
100011 +
100012 + Note that the full feature requires a gcc with plugin support,
100013 + i.e., gcc 4.5 or newer. You may need to install the supporting
100014 + headers explicitly in addition to the normal gcc package. Using
100015 + older gcc versions means that functions with large enough stack
100016 + frames may leave uninitialized memory behind that may be exposed
100017 + to a later syscall leaking the stack.
100018 +
100019 +config PAX_MEMORY_STRUCTLEAK
100020 + bool "Forcibly initialize local variables copied to userland"
100021 + default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_PRIORITY_SECURITY)
100022 + help
100023 + By saying Y here the kernel will zero initialize some local
100024 + variables that are going to be copied to userland. This in
100025 + turn prevents unintended information leakage from the kernel
100026 + stack should later code forget to explicitly set all parts of
100027 + the copied variable.
100028 +
100029 + The tradeoff is less performance impact than PAX_MEMORY_STACKLEAK
100030 + at a much smaller coverage.
100031 +
100032 + Note that the implementation requires a gcc with plugin support,
100033 + i.e., gcc 4.5 or newer. You may need to install the supporting
100034 + headers explicitly in addition to the normal gcc package.
100035 +
100036 +config PAX_MEMORY_UDEREF
100037 + bool "Prevent invalid userland pointer dereference"
100038 + default y if GRKERNSEC_CONFIG_AUTO && !(X86_64 && GRKERNSEC_CONFIG_PRIORITY_PERF) && (GRKERNSEC_CONFIG_VIRT_NONE || GRKERNSEC_CONFIG_VIRT_EPT)
100039 + depends on (X86 || (ARM && (CPU_V6 || CPU_V6K || CPU_V7) && !ARM_LPAE)) && !UML_X86 && !XEN
100040 + select PAX_PER_CPU_PGD if X86_64
100041 + help
100042 + By saying Y here the kernel will be prevented from dereferencing
100043 + userland pointers in contexts where the kernel expects only kernel
100044 + pointers. This is both a useful runtime debugging feature and a
100045 + security measure that prevents exploiting a class of kernel bugs.
100046 +
100047 + The tradeoff is that some virtualization solutions may experience
100048 + a huge slowdown and therefore you should not enable this feature
100049 + for kernels meant to run in such environments. Whether a given VM
100050 + solution is affected or not is best determined by simply trying it
100051 + out, the performance impact will be obvious right on boot as this
100052 + mechanism engages from very early on. A good rule of thumb is that
100053 + VMs running on CPUs without hardware virtualization support (i.e.,
100054 + the majority of IA-32 CPUs) will likely experience the slowdown.
100055 +
100056 + On X86_64 the kernel will make use of PCID support when available
100057 + (Intel's Westmere, Sandy Bridge, etc) for better security (default)
100058 + or performance impact. Pass pax_weakuderef on the kernel command
100059 + line to choose the latter.
100060 +
100061 +config PAX_REFCOUNT
100062 + bool "Prevent various kernel object reference counter overflows"
100063 + default y if GRKERNSEC_CONFIG_AUTO
100064 + depends on GRKERNSEC && ((ARM && (CPU_V6 || CPU_V6K || CPU_V7)) || MIPS || SPARC64 || X86)
100065 + help
100066 + By saying Y here the kernel will detect and prevent overflowing
100067 + various (but not all) kinds of object reference counters. Such
100068 + overflows can normally occur due to bugs only and are often, if
100069 + not always, exploitable.
100070 +
100071 + The tradeoff is that data structures protected by an overflowed
100072 + refcount will never be freed and therefore will leak memory. Note
100073 + that this leak also happens even without this protection but in
100074 + that case the overflow can eventually trigger the freeing of the
100075 + data structure while it is still being used elsewhere, resulting
100076 + in the exploitable situation that this feature prevents.
100077 +
100078 + Since this has a negligible performance impact, you should enable
100079 + this feature.
100080 +
100081 +config PAX_CONSTIFY_PLUGIN
100082 + bool "Automatically constify eligible structures"
100083 + default y
100084 + depends on !UML && PAX_KERNEXEC
100085 + help
100086 + By saying Y here the compiler will automatically constify a class
100087 + of types that contain only function pointers. This reduces the
100088 + kernel's attack surface and also produces a better memory layout.
100089 +
100090 + Note that the implementation requires a gcc with plugin support,
100091 + i.e., gcc 4.5 or newer. You may need to install the supporting
100092 + headers explicitly in addition to the normal gcc package.
100093 +
100094 + Note that if some code really has to modify constified variables
100095 + then the source code will have to be patched to allow it. Examples
100096 + can be found in PaX itself (the no_const attribute) and for some
100097 + out-of-tree modules at http://www.grsecurity.net/~paxguy1/ .
100098 +
100099 +config PAX_USERCOPY
100100 + bool "Harden heap object copies between kernel and userland"
100101 + default y if GRKERNSEC_CONFIG_AUTO
100102 + depends on ARM || IA64 || PPC || SPARC || X86
100103 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
100104 + select PAX_USERCOPY_SLABS
100105 + help
100106 + By saying Y here the kernel will enforce the size of heap objects
100107 + when they are copied in either direction between the kernel and
100108 + userland, even if only a part of the heap object is copied.
100109 +
100110 + Specifically, this checking prevents information leaking from the
100111 + kernel heap during kernel to userland copies (if the kernel heap
100112 + object is otherwise fully initialized) and prevents kernel heap
100113 + overflows during userland to kernel copies.
100114 +
100115 + Note that the current implementation provides the strictest bounds
100116 + checks for the SLUB allocator.
100117 +
100118 + Enabling this option also enables per-slab cache protection against
100119 + data in a given cache being copied into/out of via userland
100120 + accessors. Though the whitelist of regions will be reduced over
100121 + time, it notably protects important data structures like task structs.
100122 +
100123 + If frame pointers are enabled on x86, this option will also restrict
100124 + copies into and out of the kernel stack to local variables within a
100125 + single frame.
100126 +
100127 + Since this has a negligible performance impact, you should enable
100128 + this feature.
100129 +
100130 +config PAX_USERCOPY_DEBUG
100131 + bool
100132 + depends on X86 && PAX_USERCOPY
100133 + default n
100134 +
100135 +config PAX_SIZE_OVERFLOW
100136 + bool "Prevent various integer overflows in function size parameters"
100137 + default y if GRKERNSEC_CONFIG_AUTO
100138 + depends on X86
100139 + help
100140 + By saying Y here the kernel recomputes expressions of function
100141 + arguments marked by a size_overflow attribute with double integer
100142 + precision (DImode/TImode for 32/64 bit integer types).
100143 +
100144 + The recomputed argument is checked against TYPE_MAX and an event
100145 + is logged on overflow and the triggering process is killed.
100146 +
100147 + Homepage: http://www.grsecurity.net/~ephox/overflow_plugin/
100148 +
100149 + Note that the implementation requires a gcc with plugin support,
100150 + i.e., gcc 4.5 or newer. You may need to install the supporting
100151 + headers explicitly in addition to the normal gcc package.
100152 +
100153 +config PAX_LATENT_ENTROPY
100154 + bool "Generate some entropy during boot and runtime"
100155 + default y if GRKERNSEC_CONFIG_AUTO
100156 + help
100157 + By saying Y here the kernel will instrument some kernel code to
100158 + extract some entropy from both original and artificially created
100159 + program state. This will help especially embedded systems where
100160 + there is little 'natural' source of entropy normally. The cost
100161 + is some slowdown of the boot process and fork and irq processing.
100162 +
100163 + When pax_extra_latent_entropy is passed on the kernel command line,
100164 + entropy will be extracted from up to the first 4GB of RAM while the
100165 + runtime memory allocator is being initialized. This costs even more
100166 + slowdown of the boot process.
100167 +
100168 + Note that the implementation requires a gcc with plugin support,
100169 + i.e., gcc 4.5 or newer. You may need to install the supporting
100170 + headers explicitly in addition to the normal gcc package.
100171 +
100172 + Note that entropy extracted this way is not cryptographically
100173 + secure!
100174 +
100175 +endmenu
100176 +
100177 +endmenu
100178 +
100179 +source grsecurity/Kconfig
100180 +
100181 +endmenu
100182 +
100183 +endmenu
100184 +
100185 source security/keys/Kconfig
100186
100187 config SECURITY_DMESG_RESTRICT
100188 @@ -103,7 +1056,7 @@ config INTEL_TXT
100189 config LSM_MMAP_MIN_ADDR
100190 int "Low address space for LSM to protect from user allocation"
100191 depends on SECURITY && SECURITY_SELINUX
100192 - default 32768 if ARM
100193 + default 32768 if ALPHA || ARM || PARISC || SPARC32
100194 default 65536
100195 help
100196 This is the portion of low virtual memory which should be protected
100197 diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
100198 index fb99e18..b3e0bbb 100644
100199 --- a/security/apparmor/lsm.c
100200 +++ b/security/apparmor/lsm.c
100201 @@ -615,7 +615,7 @@ static int apparmor_task_setrlimit(struct task_struct *task,
100202 return error;
100203 }
100204
100205 -static struct security_operations apparmor_ops = {
100206 +static struct security_operations apparmor_ops __read_only = {
100207 .name = "apparmor",
100208
100209 .ptrace_access_check = apparmor_ptrace_access_check,
100210 diff --git a/security/commoncap.c b/security/commoncap.c
100211 index b9d613e..f68305c 100644
100212 --- a/security/commoncap.c
100213 +++ b/security/commoncap.c
100214 @@ -424,6 +424,32 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
100215 return 0;
100216 }
100217
100218 +/* returns:
100219 + 1 for suid privilege
100220 + 2 for sgid privilege
100221 + 3 for fscap privilege
100222 +*/
100223 +int is_privileged_binary(const struct dentry *dentry)
100224 +{
100225 + struct cpu_vfs_cap_data capdata;
100226 + struct inode *inode = dentry->d_inode;
100227 +
100228 + if (!inode || S_ISDIR(inode->i_mode))
100229 + return 0;
100230 +
100231 + if (inode->i_mode & S_ISUID)
100232 + return 1;
100233 + if ((inode->i_mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))
100234 + return 2;
100235 +
100236 + if (!get_vfs_caps_from_disk(dentry, &capdata)) {
100237 + if (!cap_isclear(capdata.inheritable) || !cap_isclear(capdata.permitted))
100238 + return 3;
100239 + }
100240 +
100241 + return 0;
100242 +}
100243 +
100244 /*
100245 * Attempt to get the on-exec apply capability sets for an executable file from
100246 * its xattrs and, if present, apply them to the proposed credentials being
100247 @@ -592,6 +618,9 @@ int cap_bprm_secureexec(struct linux_binprm *bprm)
100248 const struct cred *cred = current_cred();
100249 kuid_t root_uid = make_kuid(cred->user_ns, 0);
100250
100251 + if (gr_acl_enable_at_secure())
100252 + return 1;
100253 +
100254 if (!uid_eq(cred->uid, root_uid)) {
100255 if (bprm->cap_effective)
100256 return 1;
100257 diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
100258 index b3dd616..4bb17f3 100644
100259 --- a/security/integrity/ima/ima.h
100260 +++ b/security/integrity/ima/ima.h
100261 @@ -83,8 +83,8 @@ int ima_init_crypto(void);
100262 extern spinlock_t ima_queue_lock;
100263
100264 struct ima_h_table {
100265 - atomic_long_t len; /* number of stored measurements in the list */
100266 - atomic_long_t violations;
100267 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
100268 + atomic_long_unchecked_t violations;
100269 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
100270 };
100271 extern struct ima_h_table ima_htable;
100272 diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
100273 index 1c03e8f1..398a941 100644
100274 --- a/security/integrity/ima/ima_api.c
100275 +++ b/security/integrity/ima/ima_api.c
100276 @@ -79,7 +79,7 @@ void ima_add_violation(struct inode *inode, const unsigned char *filename,
100277 int result;
100278
100279 /* can overflow, only indicator */
100280 - atomic_long_inc(&ima_htable.violations);
100281 + atomic_long_inc_unchecked(&ima_htable.violations);
100282
100283 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
100284 if (!entry) {
100285 diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
100286 index 38477c9..87a60c7 100644
100287 --- a/security/integrity/ima/ima_fs.c
100288 +++ b/security/integrity/ima/ima_fs.c
100289 @@ -28,12 +28,12 @@
100290 static int valid_policy = 1;
100291 #define TMPBUFLEN 12
100292 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
100293 - loff_t *ppos, atomic_long_t *val)
100294 + loff_t *ppos, atomic_long_unchecked_t *val)
100295 {
100296 char tmpbuf[TMPBUFLEN];
100297 ssize_t len;
100298
100299 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
100300 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
100301 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
100302 }
100303
100304 diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
100305 index ff63fe0..809cd96 100644
100306 --- a/security/integrity/ima/ima_queue.c
100307 +++ b/security/integrity/ima/ima_queue.c
100308 @@ -80,7 +80,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
100309 INIT_LIST_HEAD(&qe->later);
100310 list_add_tail_rcu(&qe->later, &ima_measurements);
100311
100312 - atomic_long_inc(&ima_htable.len);
100313 + atomic_long_inc_unchecked(&ima_htable.len);
100314 key = ima_hash_key(entry->digest);
100315 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
100316 return 0;
100317 diff --git a/security/keys/compat.c b/security/keys/compat.c
100318 index d65fa7f..cbfe366 100644
100319 --- a/security/keys/compat.c
100320 +++ b/security/keys/compat.c
100321 @@ -44,7 +44,7 @@ static long compat_keyctl_instantiate_key_iov(
100322 if (ret == 0)
100323 goto no_payload_free;
100324
100325 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
100326 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
100327 err:
100328 if (iov != iovstack)
100329 kfree(iov);
100330 diff --git a/security/keys/internal.h b/security/keys/internal.h
100331 index d4f1468..cc52f92 100644
100332 --- a/security/keys/internal.h
100333 +++ b/security/keys/internal.h
100334 @@ -242,7 +242,7 @@ extern long keyctl_instantiate_key_iov(key_serial_t,
100335 extern long keyctl_invalidate_key(key_serial_t);
100336
100337 extern long keyctl_instantiate_key_common(key_serial_t,
100338 - const struct iovec *,
100339 + const struct iovec __user *,
100340 unsigned, size_t, key_serial_t);
100341
100342 /*
100343 diff --git a/security/keys/key.c b/security/keys/key.c
100344 index 8fb7c7b..ba3610d 100644
100345 --- a/security/keys/key.c
100346 +++ b/security/keys/key.c
100347 @@ -284,7 +284,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
100348
100349 atomic_set(&key->usage, 1);
100350 init_rwsem(&key->sem);
100351 - lockdep_set_class(&key->sem, &type->lock_class);
100352 + lockdep_set_class(&key->sem, (struct lock_class_key *)&type->lock_class);
100353 key->type = type;
100354 key->user = user;
100355 key->quotalen = quotalen;
100356 @@ -1032,7 +1032,9 @@ int register_key_type(struct key_type *ktype)
100357 struct key_type *p;
100358 int ret;
100359
100360 - memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
100361 + pax_open_kernel();
100362 + memset((void *)&ktype->lock_class, 0, sizeof(ktype->lock_class));
100363 + pax_close_kernel();
100364
100365 ret = -EEXIST;
100366 down_write(&key_types_sem);
100367 @@ -1044,7 +1046,7 @@ int register_key_type(struct key_type *ktype)
100368 }
100369
100370 /* store the type */
100371 - list_add(&ktype->link, &key_types_list);
100372 + pax_list_add((struct list_head *)&ktype->link, &key_types_list);
100373
100374 pr_notice("Key type %s registered\n", ktype->name);
100375 ret = 0;
100376 @@ -1066,7 +1068,7 @@ EXPORT_SYMBOL(register_key_type);
100377 void unregister_key_type(struct key_type *ktype)
100378 {
100379 down_write(&key_types_sem);
100380 - list_del_init(&ktype->link);
100381 + pax_list_del_init((struct list_head *)&ktype->link);
100382 downgrade_write(&key_types_sem);
100383 key_gc_keytype(ktype);
100384 pr_notice("Key type %s unregistered\n", ktype->name);
100385 @@ -1084,10 +1086,10 @@ void __init key_init(void)
100386 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
100387
100388 /* add the special key types */
100389 - list_add_tail(&key_type_keyring.link, &key_types_list);
100390 - list_add_tail(&key_type_dead.link, &key_types_list);
100391 - list_add_tail(&key_type_user.link, &key_types_list);
100392 - list_add_tail(&key_type_logon.link, &key_types_list);
100393 + pax_list_add_tail((struct list_head *)&key_type_keyring.link, &key_types_list);
100394 + pax_list_add_tail((struct list_head *)&key_type_dead.link, &key_types_list);
100395 + pax_list_add_tail((struct list_head *)&key_type_user.link, &key_types_list);
100396 + pax_list_add_tail((struct list_head *)&key_type_logon.link, &key_types_list);
100397
100398 /* record the root user tracking */
100399 rb_link_node(&root_key_user.node,
100400 diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
100401 index 33cfd27..842fc5a 100644
100402 --- a/security/keys/keyctl.c
100403 +++ b/security/keys/keyctl.c
100404 @@ -987,7 +987,7 @@ static int keyctl_change_reqkey_auth(struct key *key)
100405 /*
100406 * Copy the iovec data from userspace
100407 */
100408 -static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
100409 +static long copy_from_user_iovec(void *buffer, const struct iovec __user *iov,
100410 unsigned ioc)
100411 {
100412 for (; ioc > 0; ioc--) {
100413 @@ -1009,7 +1009,7 @@ static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
100414 * If successful, 0 will be returned.
100415 */
100416 long keyctl_instantiate_key_common(key_serial_t id,
100417 - const struct iovec *payload_iov,
100418 + const struct iovec __user *payload_iov,
100419 unsigned ioc,
100420 size_t plen,
100421 key_serial_t ringid)
100422 @@ -1104,7 +1104,7 @@ long keyctl_instantiate_key(key_serial_t id,
100423 [0].iov_len = plen
100424 };
100425
100426 - return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
100427 + return keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, 1, plen, ringid);
100428 }
100429
100430 return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
100431 @@ -1137,7 +1137,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
100432 if (ret == 0)
100433 goto no_payload_free;
100434
100435 - ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
100436 + ret = keyctl_instantiate_key_common(id, (const struct iovec __force_user *)iov, ioc, ret, ringid);
100437 err:
100438 if (iov != iovstack)
100439 kfree(iov);
100440 diff --git a/security/keys/keyring.c b/security/keys/keyring.c
100441 index 6ece7f2..ecdb55c 100644
100442 --- a/security/keys/keyring.c
100443 +++ b/security/keys/keyring.c
100444 @@ -227,16 +227,16 @@ static long keyring_read(const struct key *keyring,
100445 ret = -EFAULT;
100446
100447 for (loop = 0; loop < klist->nkeys; loop++) {
100448 + key_serial_t serial;
100449 key = rcu_deref_link_locked(klist, loop,
100450 keyring);
100451 + serial = key->serial;
100452
100453 tmp = sizeof(key_serial_t);
100454 if (tmp > buflen)
100455 tmp = buflen;
100456
100457 - if (copy_to_user(buffer,
100458 - &key->serial,
100459 - tmp) != 0)
100460 + if (copy_to_user(buffer, &serial, tmp))
100461 goto error;
100462
100463 buflen -= tmp;
100464 diff --git a/security/min_addr.c b/security/min_addr.c
100465 index f728728..6457a0c 100644
100466 --- a/security/min_addr.c
100467 +++ b/security/min_addr.c
100468 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG_DEFAULT_MMAP_MIN_ADDR;
100469 */
100470 static void update_mmap_min_addr(void)
100471 {
100472 +#ifndef SPARC
100473 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
100474 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
100475 mmap_min_addr = dac_mmap_min_addr;
100476 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
100477 #else
100478 mmap_min_addr = dac_mmap_min_addr;
100479 #endif
100480 +#endif
100481 }
100482
100483 /*
100484 diff --git a/security/security.c b/security/security.c
100485 index 4dc31f4..eabcf41 100644
100486 --- a/security/security.c
100487 +++ b/security/security.c
100488 @@ -33,8 +33,8 @@
100489 static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
100490 CONFIG_DEFAULT_SECURITY;
100491
100492 -static struct security_operations *security_ops;
100493 -static struct security_operations default_security_ops = {
100494 +struct security_operations *security_ops __read_only;
100495 +struct security_operations default_security_ops __read_only = {
100496 .name = "default",
100497 };
100498
100499 @@ -73,11 +73,6 @@ int __init security_init(void)
100500 return 0;
100501 }
100502
100503 -void reset_security_ops(void)
100504 -{
100505 - security_ops = &default_security_ops;
100506 -}
100507 -
100508 /* Save user chosen LSM */
100509 static int __init choose_lsm(char *str)
100510 {
100511 diff --git a/security/selinux/avc.c b/security/selinux/avc.c
100512 index fc3e662..7844c60 100644
100513 --- a/security/selinux/avc.c
100514 +++ b/security/selinux/avc.c
100515 @@ -59,7 +59,7 @@ struct avc_node {
100516 struct avc_cache {
100517 struct hlist_head slots[AVC_CACHE_SLOTS]; /* head for avc_node->list */
100518 spinlock_t slots_lock[AVC_CACHE_SLOTS]; /* lock for writes */
100519 - atomic_t lru_hint; /* LRU hint for reclaim scan */
100520 + atomic_unchecked_t lru_hint; /* LRU hint for reclaim scan */
100521 atomic_t active_nodes;
100522 u32 latest_notif; /* latest revocation notification */
100523 };
100524 @@ -167,7 +167,7 @@ void __init avc_init(void)
100525 spin_lock_init(&avc_cache.slots_lock[i]);
100526 }
100527 atomic_set(&avc_cache.active_nodes, 0);
100528 - atomic_set(&avc_cache.lru_hint, 0);
100529 + atomic_set_unchecked(&avc_cache.lru_hint, 0);
100530
100531 avc_node_cachep = kmem_cache_create("avc_node", sizeof(struct avc_node),
100532 0, SLAB_PANIC, NULL);
100533 @@ -242,7 +242,7 @@ static inline int avc_reclaim_node(void)
100534 spinlock_t *lock;
100535
100536 for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
100537 - hvalue = atomic_inc_return(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
100538 + hvalue = atomic_inc_return_unchecked(&avc_cache.lru_hint) & (AVC_CACHE_SLOTS - 1);
100539 head = &avc_cache.slots[hvalue];
100540 lock = &avc_cache.slots_lock[hvalue];
100541
100542 diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
100543 index d9a78fd..ae3ad22 100644
100544 --- a/security/selinux/hooks.c
100545 +++ b/security/selinux/hooks.c
100546 @@ -4297,8 +4297,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
100547 }
100548 err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
100549 PEER__RECV, &ad);
100550 - if (err)
100551 + if (err) {
100552 selinux_netlbl_err(skb, err, 0);
100553 + return err;
100554 + }
100555 }
100556
100557 if (secmark_active) {
100558 @@ -5521,11 +5523,11 @@ static int selinux_setprocattr(struct task_struct *p,
100559 /* Check for ptracing, and update the task SID if ok.
100560 Otherwise, leave SID unchanged and fail. */
100561 ptsid = 0;
100562 - task_lock(p);
100563 + rcu_read_lock();
100564 tracer = ptrace_parent(p);
100565 if (tracer)
100566 ptsid = task_sid(tracer);
100567 - task_unlock(p);
100568 + rcu_read_unlock();
100569
100570 if (tracer) {
100571 error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
100572 @@ -5662,7 +5664,7 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer)
100573
100574 #endif
100575
100576 -static struct security_operations selinux_ops = {
100577 +static struct security_operations selinux_ops __read_only = {
100578 .name = "selinux",
100579
100580 .ptrace_access_check = selinux_ptrace_access_check,
100581 @@ -6014,6 +6016,9 @@ static void selinux_nf_ip_exit(void)
100582 #ifdef CONFIG_SECURITY_SELINUX_DISABLE
100583 static int selinux_disabled;
100584
100585 +extern struct security_operations *security_ops;
100586 +extern struct security_operations default_security_ops;
100587 +
100588 int selinux_disable(void)
100589 {
100590 if (ss_initialized) {
100591 @@ -6031,7 +6036,9 @@ int selinux_disable(void)
100592 selinux_disabled = 1;
100593 selinux_enabled = 0;
100594
100595 - reset_security_ops();
100596 + pax_open_kernel();
100597 + security_ops = &default_security_ops;
100598 + pax_close_kernel();
100599
100600 /* Try to destroy the avc node cache */
100601 avc_disable();
100602 diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
100603 index 6713f04..c57ecef 100644
100604 --- a/security/selinux/include/xfrm.h
100605 +++ b/security/selinux/include/xfrm.h
100606 @@ -52,7 +52,7 @@ static inline void selinux_xfrm_notify_policyload(void)
100607 {
100608 struct net *net;
100609
100610 - atomic_inc(&flow_cache_genid);
100611 + atomic_inc_unchecked(&flow_cache_genid);
100612 rtnl_lock();
100613 for_each_net(net)
100614 rt_genid_bump_all(net);
100615 diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
100616 index 8825375..97a623b 100644
100617 --- a/security/smack/smack_lsm.c
100618 +++ b/security/smack/smack_lsm.c
100619 @@ -3726,7 +3726,7 @@ static int smack_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
100620 return 0;
100621 }
100622
100623 -struct security_operations smack_ops = {
100624 +struct security_operations smack_ops __read_only = {
100625 .name = "smack",
100626
100627 .ptrace_access_check = smack_ptrace_access_check,
100628 diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
100629 index 390c646..f2f8db3 100644
100630 --- a/security/tomoyo/mount.c
100631 +++ b/security/tomoyo/mount.c
100632 @@ -118,6 +118,10 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r,
100633 type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) {
100634 need_dev = -1; /* dev_name is a directory */
100635 } else {
100636 + if (!capable(CAP_SYS_ADMIN)) {
100637 + error = -EPERM;
100638 + goto out;
100639 + }
100640 fstype = get_fs_type(type);
100641 if (!fstype) {
100642 error = -ENODEV;
100643 diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
100644 index f0b756e..b129202 100644
100645 --- a/security/tomoyo/tomoyo.c
100646 +++ b/security/tomoyo/tomoyo.c
100647 @@ -503,7 +503,7 @@ static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
100648 * tomoyo_security_ops is a "struct security_operations" which is used for
100649 * registering TOMOYO.
100650 */
100651 -static struct security_operations tomoyo_security_ops = {
100652 +static struct security_operations tomoyo_security_ops __read_only = {
100653 .name = "tomoyo",
100654 .cred_alloc_blank = tomoyo_cred_alloc_blank,
100655 .cred_prepare = tomoyo_cred_prepare,
100656 diff --git a/security/yama/Kconfig b/security/yama/Kconfig
100657 index 20ef514..4182bed 100644
100658 --- a/security/yama/Kconfig
100659 +++ b/security/yama/Kconfig
100660 @@ -1,6 +1,6 @@
100661 config SECURITY_YAMA
100662 bool "Yama support"
100663 - depends on SECURITY
100664 + depends on SECURITY && !GRKERNSEC
100665 select SECURITYFS
100666 select SECURITY_PATH
100667 default n
100668 diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
100669 index 13c88fbc..f8c115e 100644
100670 --- a/security/yama/yama_lsm.c
100671 +++ b/security/yama/yama_lsm.c
100672 @@ -365,7 +365,7 @@ int yama_ptrace_traceme(struct task_struct *parent)
100673 }
100674
100675 #ifndef CONFIG_SECURITY_YAMA_STACKED
100676 -static struct security_operations yama_ops = {
100677 +static struct security_operations yama_ops __read_only = {
100678 .name = "yama",
100679
100680 .ptrace_access_check = yama_ptrace_access_check,
100681 @@ -376,28 +376,24 @@ static struct security_operations yama_ops = {
100682 #endif
100683
100684 #ifdef CONFIG_SYSCTL
100685 +static int zero __read_only;
100686 +static int max_scope __read_only = YAMA_SCOPE_NO_ATTACH;
100687 +
100688 static int yama_dointvec_minmax(struct ctl_table *table, int write,
100689 void __user *buffer, size_t *lenp, loff_t *ppos)
100690 {
100691 - int rc;
100692 + ctl_table_no_const yama_table;
100693
100694 if (write && !capable(CAP_SYS_PTRACE))
100695 return -EPERM;
100696
100697 - rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
100698 - if (rc)
100699 - return rc;
100700 -
100701 + yama_table = *table;
100702 /* Lock the max value if it ever gets set. */
100703 - if (write && *(int *)table->data == *(int *)table->extra2)
100704 - table->extra1 = table->extra2;
100705 -
100706 - return rc;
100707 + if (ptrace_scope == max_scope)
100708 + yama_table.extra1 = &max_scope;
100709 + return proc_dointvec_minmax(&yama_table, write, buffer, lenp, ppos);
100710 }
100711
100712 -static int zero;
100713 -static int max_scope = YAMA_SCOPE_NO_ATTACH;
100714 -
100715 struct ctl_path yama_sysctl_path[] = {
100716 { .procname = "kernel", },
100717 { .procname = "yama", },
100718 diff --git a/sound/aoa/codecs/onyx.c b/sound/aoa/codecs/onyx.c
100719 index 4cedc69..e59d8a3 100644
100720 --- a/sound/aoa/codecs/onyx.c
100721 +++ b/sound/aoa/codecs/onyx.c
100722 @@ -54,7 +54,7 @@ struct onyx {
100723 spdif_locked:1,
100724 analog_locked:1,
100725 original_mute:2;
100726 - int open_count;
100727 + local_t open_count;
100728 struct codec_info *codec_info;
100729
100730 /* mutex serializes concurrent access to the device
100731 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_item *cii,
100732 struct onyx *onyx = cii->codec_data;
100733
100734 mutex_lock(&onyx->mutex);
100735 - onyx->open_count++;
100736 + local_inc(&onyx->open_count);
100737 mutex_unlock(&onyx->mutex);
100738
100739 return 0;
100740 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_item *cii,
100741 struct onyx *onyx = cii->codec_data;
100742
100743 mutex_lock(&onyx->mutex);
100744 - onyx->open_count--;
100745 - if (!onyx->open_count)
100746 + if (local_dec_and_test(&onyx->open_count))
100747 onyx->spdif_locked = onyx->analog_locked = 0;
100748 mutex_unlock(&onyx->mutex);
100749
100750 diff --git a/sound/aoa/codecs/onyx.h b/sound/aoa/codecs/onyx.h
100751 index ffd2025..df062c9 100644
100752 --- a/sound/aoa/codecs/onyx.h
100753 +++ b/sound/aoa/codecs/onyx.h
100754 @@ -11,6 +11,7 @@
100755 #include <linux/i2c.h>
100756 #include <asm/pmac_low_i2c.h>
100757 #include <asm/prom.h>
100758 +#include <asm/local.h>
100759
100760 /* PCM3052 register definitions */
100761
100762 diff --git a/sound/core/oss/pcm_oss.c b/sound/core/oss/pcm_oss.c
100763 index 4c1cc51..16040040 100644
100764 --- a/sound/core/oss/pcm_oss.c
100765 +++ b/sound/core/oss/pcm_oss.c
100766 @@ -1189,10 +1189,10 @@ snd_pcm_sframes_t snd_pcm_oss_write3(struct snd_pcm_substream *substream, const
100767 if (in_kernel) {
100768 mm_segment_t fs;
100769 fs = snd_enter_user();
100770 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
100771 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
100772 snd_leave_user(fs);
100773 } else {
100774 - ret = snd_pcm_lib_write(substream, (void __force __user *)ptr, frames);
100775 + ret = snd_pcm_lib_write(substream, (void __force_user *)ptr, frames);
100776 }
100777 if (ret != -EPIPE && ret != -ESTRPIPE)
100778 break;
100779 @@ -1234,10 +1234,10 @@ snd_pcm_sframes_t snd_pcm_oss_read3(struct snd_pcm_substream *substream, char *p
100780 if (in_kernel) {
100781 mm_segment_t fs;
100782 fs = snd_enter_user();
100783 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
100784 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
100785 snd_leave_user(fs);
100786 } else {
100787 - ret = snd_pcm_lib_read(substream, (void __force __user *)ptr, frames);
100788 + ret = snd_pcm_lib_read(substream, (void __force_user *)ptr, frames);
100789 }
100790 if (ret == -EPIPE) {
100791 if (runtime->status->state == SNDRV_PCM_STATE_DRAINING) {
100792 @@ -1337,7 +1337,7 @@ static ssize_t snd_pcm_oss_write2(struct snd_pcm_substream *substream, const cha
100793 struct snd_pcm_plugin_channel *channels;
100794 size_t oss_frame_bytes = (runtime->oss.plugin_first->src_width * runtime->oss.plugin_first->src_format.channels) / 8;
100795 if (!in_kernel) {
100796 - if (copy_from_user(runtime->oss.buffer, (const char __force __user *)buf, bytes))
100797 + if (copy_from_user(runtime->oss.buffer, (const char __force_user *)buf, bytes))
100798 return -EFAULT;
100799 buf = runtime->oss.buffer;
100800 }
100801 @@ -1407,7 +1407,7 @@ static ssize_t snd_pcm_oss_write1(struct snd_pcm_substream *substream, const cha
100802 }
100803 } else {
100804 tmp = snd_pcm_oss_write2(substream,
100805 - (const char __force *)buf,
100806 + (const char __force_kernel *)buf,
100807 runtime->oss.period_bytes, 0);
100808 if (tmp <= 0)
100809 goto err;
100810 @@ -1433,7 +1433,7 @@ static ssize_t snd_pcm_oss_read2(struct snd_pcm_substream *substream, char *buf,
100811 struct snd_pcm_runtime *runtime = substream->runtime;
100812 snd_pcm_sframes_t frames, frames1;
100813 #ifdef CONFIG_SND_PCM_OSS_PLUGINS
100814 - char __user *final_dst = (char __force __user *)buf;
100815 + char __user *final_dst = (char __force_user *)buf;
100816 if (runtime->oss.plugin_first) {
100817 struct snd_pcm_plugin_channel *channels;
100818 size_t oss_frame_bytes = (runtime->oss.plugin_last->dst_width * runtime->oss.plugin_last->dst_format.channels) / 8;
100819 @@ -1495,7 +1495,7 @@ static ssize_t snd_pcm_oss_read1(struct snd_pcm_substream *substream, char __use
100820 xfer += tmp;
100821 runtime->oss.buffer_used -= tmp;
100822 } else {
100823 - tmp = snd_pcm_oss_read2(substream, (char __force *)buf,
100824 + tmp = snd_pcm_oss_read2(substream, (char __force_kernel *)buf,
100825 runtime->oss.period_bytes, 0);
100826 if (tmp <= 0)
100827 goto err;
100828 @@ -1663,7 +1663,7 @@ static int snd_pcm_oss_sync(struct snd_pcm_oss_file *pcm_oss_file)
100829 size1);
100830 size1 /= runtime->channels; /* frames */
100831 fs = snd_enter_user();
100832 - snd_pcm_lib_write(substream, (void __force __user *)runtime->oss.buffer, size1);
100833 + snd_pcm_lib_write(substream, (void __force_user *)runtime->oss.buffer, size1);
100834 snd_leave_user(fs);
100835 }
100836 } else if (runtime->access == SNDRV_PCM_ACCESS_RW_NONINTERLEAVED) {
100837 diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
100838 index af49721..e85058e 100644
100839 --- a/sound/core/pcm_compat.c
100840 +++ b/sound/core/pcm_compat.c
100841 @@ -31,7 +31,7 @@ static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
100842 int err;
100843
100844 fs = snd_enter_user();
100845 - err = snd_pcm_delay(substream, &delay);
100846 + err = snd_pcm_delay(substream, (snd_pcm_sframes_t __force_user *)&delay);
100847 snd_leave_user(fs);
100848 if (err < 0)
100849 return err;
100850 diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
100851 index a68d4c6..72af3cf 100644
100852 --- a/sound/core/pcm_native.c
100853 +++ b/sound/core/pcm_native.c
100854 @@ -2809,11 +2809,11 @@ int snd_pcm_kernel_ioctl(struct snd_pcm_substream *substream,
100855 switch (substream->stream) {
100856 case SNDRV_PCM_STREAM_PLAYBACK:
100857 result = snd_pcm_playback_ioctl1(NULL, substream, cmd,
100858 - (void __user *)arg);
100859 + (void __force_user *)arg);
100860 break;
100861 case SNDRV_PCM_STREAM_CAPTURE:
100862 result = snd_pcm_capture_ioctl1(NULL, substream, cmd,
100863 - (void __user *)arg);
100864 + (void __force_user *)arg);
100865 break;
100866 default:
100867 result = -EINVAL;
100868 diff --git a/sound/core/seq/seq_device.c b/sound/core/seq/seq_device.c
100869 index 040c60e..989a19a 100644
100870 --- a/sound/core/seq/seq_device.c
100871 +++ b/sound/core/seq/seq_device.c
100872 @@ -64,7 +64,7 @@ struct ops_list {
100873 int argsize; /* argument size */
100874
100875 /* operators */
100876 - struct snd_seq_dev_ops ops;
100877 + struct snd_seq_dev_ops *ops;
100878
100879 /* registered devices */
100880 struct list_head dev_list; /* list of devices */
100881 @@ -333,7 +333,7 @@ int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
100882
100883 mutex_lock(&ops->reg_mutex);
100884 /* copy driver operators */
100885 - ops->ops = *entry;
100886 + ops->ops = entry;
100887 ops->driver |= DRIVER_LOADED;
100888 ops->argsize = argsize;
100889
100890 @@ -463,7 +463,7 @@ static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
100891 dev->name, ops->id, ops->argsize, dev->argsize);
100892 return -EINVAL;
100893 }
100894 - if (ops->ops.init_device(dev) >= 0) {
100895 + if (ops->ops->init_device(dev) >= 0) {
100896 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
100897 ops->num_init_devices++;
100898 } else {
100899 @@ -490,7 +490,7 @@ static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
100900 dev->name, ops->id, ops->argsize, dev->argsize);
100901 return -EINVAL;
100902 }
100903 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
100904 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
100905 dev->status = SNDRV_SEQ_DEVICE_FREE;
100906 dev->driver_data = NULL;
100907 ops->num_init_devices--;
100908 diff --git a/sound/core/sound.c b/sound/core/sound.c
100909 index f002bd9..c462985 100644
100910 --- a/sound/core/sound.c
100911 +++ b/sound/core/sound.c
100912 @@ -86,7 +86,7 @@ static void snd_request_other(int minor)
100913 case SNDRV_MINOR_TIMER: str = "snd-timer"; break;
100914 default: return;
100915 }
100916 - request_module(str);
100917 + request_module("%s", str);
100918 }
100919
100920 #endif /* modular kernel */
100921 diff --git a/sound/drivers/mts64.c b/sound/drivers/mts64.c
100922 index 4e0dd22..7a1f32c 100644
100923 --- a/sound/drivers/mts64.c
100924 +++ b/sound/drivers/mts64.c
100925 @@ -29,6 +29,7 @@
100926 #include <sound/initval.h>
100927 #include <sound/rawmidi.h>
100928 #include <sound/control.h>
100929 +#include <asm/local.h>
100930
100931 #define CARD_NAME "Miditerminal 4140"
100932 #define DRIVER_NAME "MTS64"
100933 @@ -67,7 +68,7 @@ struct mts64 {
100934 struct pardevice *pardev;
100935 int pardev_claimed;
100936
100937 - int open_count;
100938 + local_t open_count;
100939 int current_midi_output_port;
100940 int current_midi_input_port;
100941 u8 mode[MTS64_NUM_INPUT_PORTS];
100942 @@ -697,7 +698,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
100943 {
100944 struct mts64 *mts = substream->rmidi->private_data;
100945
100946 - if (mts->open_count == 0) {
100947 + if (local_read(&mts->open_count) == 0) {
100948 /* We don't need a spinlock here, because this is just called
100949 if the device has not been opened before.
100950 So there aren't any IRQs from the device */
100951 @@ -705,7 +706,7 @@ static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream)
100952
100953 msleep(50);
100954 }
100955 - ++(mts->open_count);
100956 + local_inc(&mts->open_count);
100957
100958 return 0;
100959 }
100960 @@ -715,8 +716,7 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
100961 struct mts64 *mts = substream->rmidi->private_data;
100962 unsigned long flags;
100963
100964 - --(mts->open_count);
100965 - if (mts->open_count == 0) {
100966 + if (local_dec_return(&mts->open_count) == 0) {
100967 /* We need the spinlock_irqsave here because we can still
100968 have IRQs at this point */
100969 spin_lock_irqsave(&mts->lock, flags);
100970 @@ -725,8 +725,8 @@ static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream)
100971
100972 msleep(500);
100973
100974 - } else if (mts->open_count < 0)
100975 - mts->open_count = 0;
100976 + } else if (local_read(&mts->open_count) < 0)
100977 + local_set(&mts->open_count, 0);
100978
100979 return 0;
100980 }
100981 diff --git a/sound/drivers/opl4/opl4_lib.c b/sound/drivers/opl4/opl4_lib.c
100982 index b953fb4..1999c01 100644
100983 --- a/sound/drivers/opl4/opl4_lib.c
100984 +++ b/sound/drivers/opl4/opl4_lib.c
100985 @@ -29,7 +29,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>");
100986 MODULE_DESCRIPTION("OPL4 driver");
100987 MODULE_LICENSE("GPL");
100988
100989 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
100990 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
100991 {
100992 int timeout = 10;
100993 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
100994 diff --git a/sound/drivers/portman2x4.c b/sound/drivers/portman2x4.c
100995 index 991018d..8984740 100644
100996 --- a/sound/drivers/portman2x4.c
100997 +++ b/sound/drivers/portman2x4.c
100998 @@ -48,6 +48,7 @@
100999 #include <sound/initval.h>
101000 #include <sound/rawmidi.h>
101001 #include <sound/control.h>
101002 +#include <asm/local.h>
101003
101004 #define CARD_NAME "Portman 2x4"
101005 #define DRIVER_NAME "portman"
101006 @@ -85,7 +86,7 @@ struct portman {
101007 struct pardevice *pardev;
101008 int pardev_claimed;
101009
101010 - int open_count;
101011 + local_t open_count;
101012 int mode[PORTMAN_NUM_INPUT_PORTS];
101013 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
101014 };
101015 diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
101016 index ea995af..f1bfa37 100644
101017 --- a/sound/firewire/amdtp.c
101018 +++ b/sound/firewire/amdtp.c
101019 @@ -389,7 +389,7 @@ static void queue_out_packet(struct amdtp_out_stream *s, unsigned int cycle)
101020 ptr = s->pcm_buffer_pointer + data_blocks;
101021 if (ptr >= pcm->runtime->buffer_size)
101022 ptr -= pcm->runtime->buffer_size;
101023 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
101024 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
101025
101026 s->pcm_period_pointer += data_blocks;
101027 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
101028 @@ -557,7 +557,7 @@ EXPORT_SYMBOL(amdtp_out_stream_pcm_pointer);
101029 */
101030 void amdtp_out_stream_update(struct amdtp_out_stream *s)
101031 {
101032 - ACCESS_ONCE(s->source_node_id_field) =
101033 + ACCESS_ONCE_RW(s->source_node_id_field) =
101034 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
101035 }
101036 EXPORT_SYMBOL(amdtp_out_stream_update);
101037 diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
101038 index f6103d6..4843fbf 100644
101039 --- a/sound/firewire/amdtp.h
101040 +++ b/sound/firewire/amdtp.h
101041 @@ -138,7 +138,7 @@ static inline bool amdtp_out_streaming_error(struct amdtp_out_stream *s)
101042 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
101043 struct snd_pcm_substream *pcm)
101044 {
101045 - ACCESS_ONCE(s->pcm) = pcm;
101046 + ACCESS_ONCE_RW(s->pcm) = pcm;
101047 }
101048
101049 static inline bool cip_sfc_is_base_44100(enum cip_sfc sfc)
101050 diff --git a/sound/firewire/isight.c b/sound/firewire/isight.c
101051 index 58a5afe..af5bd64 100644
101052 --- a/sound/firewire/isight.c
101053 +++ b/sound/firewire/isight.c
101054 @@ -96,7 +96,7 @@ static void isight_update_pointers(struct isight *isight, unsigned int count)
101055 ptr += count;
101056 if (ptr >= runtime->buffer_size)
101057 ptr -= runtime->buffer_size;
101058 - ACCESS_ONCE(isight->buffer_pointer) = ptr;
101059 + ACCESS_ONCE_RW(isight->buffer_pointer) = ptr;
101060
101061 isight->period_counter += count;
101062 if (isight->period_counter >= runtime->period_size) {
101063 @@ -307,7 +307,7 @@ static int isight_hw_params(struct snd_pcm_substream *substream,
101064 if (err < 0)
101065 return err;
101066
101067 - ACCESS_ONCE(isight->pcm_active) = true;
101068 + ACCESS_ONCE_RW(isight->pcm_active) = true;
101069
101070 return 0;
101071 }
101072 @@ -340,7 +340,7 @@ static int isight_hw_free(struct snd_pcm_substream *substream)
101073 {
101074 struct isight *isight = substream->private_data;
101075
101076 - ACCESS_ONCE(isight->pcm_active) = false;
101077 + ACCESS_ONCE_RW(isight->pcm_active) = false;
101078
101079 mutex_lock(&isight->mutex);
101080 isight_stop_streaming(isight);
101081 @@ -433,10 +433,10 @@ static int isight_trigger(struct snd_pcm_substream *substream, int cmd)
101082
101083 switch (cmd) {
101084 case SNDRV_PCM_TRIGGER_START:
101085 - ACCESS_ONCE(isight->pcm_running) = true;
101086 + ACCESS_ONCE_RW(isight->pcm_running) = true;
101087 break;
101088 case SNDRV_PCM_TRIGGER_STOP:
101089 - ACCESS_ONCE(isight->pcm_running) = false;
101090 + ACCESS_ONCE_RW(isight->pcm_running) = false;
101091 break;
101092 default:
101093 return -EINVAL;
101094 diff --git a/sound/firewire/scs1x.c b/sound/firewire/scs1x.c
101095 index 505fc81..62e569e 100644
101096 --- a/sound/firewire/scs1x.c
101097 +++ b/sound/firewire/scs1x.c
101098 @@ -74,7 +74,7 @@ static void scs_output_trigger(struct snd_rawmidi_substream *stream, int up)
101099 {
101100 struct scs *scs = stream->rmidi->private_data;
101101
101102 - ACCESS_ONCE(scs->output) = up ? stream : NULL;
101103 + ACCESS_ONCE_RW(scs->output) = up ? stream : NULL;
101104 if (up) {
101105 scs->output_idle = false;
101106 tasklet_schedule(&scs->tasklet);
101107 @@ -257,7 +257,7 @@ static void scs_input_trigger(struct snd_rawmidi_substream *stream, int up)
101108 {
101109 struct scs *scs = stream->rmidi->private_data;
101110
101111 - ACCESS_ONCE(scs->input) = up ? stream : NULL;
101112 + ACCESS_ONCE_RW(scs->input) = up ? stream : NULL;
101113 }
101114
101115 static void scs_input_escaped_byte(struct snd_rawmidi_substream *stream,
101116 @@ -469,8 +469,8 @@ static void scs_remove(struct fw_unit *unit)
101117
101118 snd_card_disconnect(scs->card);
101119
101120 - ACCESS_ONCE(scs->output) = NULL;
101121 - ACCESS_ONCE(scs->input) = NULL;
101122 + ACCESS_ONCE_RW(scs->output) = NULL;
101123 + ACCESS_ONCE_RW(scs->input) = NULL;
101124
101125 wait_event(scs->idle_wait, scs->output_idle);
101126
101127 diff --git a/sound/oss/sb_audio.c b/sound/oss/sb_audio.c
101128 index 048439a..3be9f6f 100644
101129 --- a/sound/oss/sb_audio.c
101130 +++ b/sound/oss/sb_audio.c
101131 @@ -904,7 +904,7 @@ sb16_copy_from_user(int dev,
101132 buf16 = (signed short *)(localbuf + localoffs);
101133 while (c)
101134 {
101135 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
101136 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
101137 if (copy_from_user(lbuf8,
101138 userbuf+useroffs + p,
101139 locallen))
101140 diff --git a/sound/oss/swarm_cs4297a.c b/sound/oss/swarm_cs4297a.c
101141 index 7d8803a..559f8d0 100644
101142 --- a/sound/oss/swarm_cs4297a.c
101143 +++ b/sound/oss/swarm_cs4297a.c
101144 @@ -2621,7 +2621,6 @@ static int __init cs4297a_init(void)
101145 {
101146 struct cs4297a_state *s;
101147 u32 pwr, id;
101148 - mm_segment_t fs;
101149 int rval;
101150 #ifndef CONFIG_BCM_CS4297A_CSWARM
101151 u64 cfg;
101152 @@ -2711,22 +2710,23 @@ static int __init cs4297a_init(void)
101153 if (!rval) {
101154 char *sb1250_duart_present;
101155
101156 +#if 0
101157 + mm_segment_t fs;
101158 fs = get_fs();
101159 set_fs(KERNEL_DS);
101160 -#if 0
101161 val = SOUND_MASK_LINE;
101162 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
101163 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
101164 val = initvol[i].vol;
101165 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
101166 }
101167 + set_fs(fs);
101168 // cs4297a_write_ac97(s, 0x18, 0x0808);
101169 #else
101170 // cs4297a_write_ac97(s, 0x5e, 0x180);
101171 cs4297a_write_ac97(s, 0x02, 0x0808);
101172 cs4297a_write_ac97(s, 0x18, 0x0808);
101173 #endif
101174 - set_fs(fs);
101175
101176 list_add(&s->list, &cs4297a_devs);
101177
101178 diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
101179 index e938a68..2a728ad 100644
101180 --- a/sound/pci/hda/hda_codec.c
101181 +++ b/sound/pci/hda/hda_codec.c
101182 @@ -976,14 +976,10 @@ find_codec_preset(struct hda_codec *codec)
101183 mutex_unlock(&preset_mutex);
101184
101185 if (mod_requested < HDA_MODREQ_MAX_COUNT) {
101186 - char name[32];
101187 if (!mod_requested)
101188 - snprintf(name, sizeof(name), "snd-hda-codec-id:%08x",
101189 - codec->vendor_id);
101190 + request_module("snd-hda-codec-id:%08x", codec->vendor_id);
101191 else
101192 - snprintf(name, sizeof(name), "snd-hda-codec-id:%04x*",
101193 - (codec->vendor_id >> 16) & 0xffff);
101194 - request_module(name);
101195 + request_module("snd-hda-codec-id:%04x*", (codec->vendor_id >> 16) & 0xffff);
101196 mod_requested++;
101197 goto again;
101198 }
101199 diff --git a/sound/pci/ymfpci/ymfpci.h b/sound/pci/ymfpci/ymfpci.h
101200 index 4631a23..001ae57 100644
101201 --- a/sound/pci/ymfpci/ymfpci.h
101202 +++ b/sound/pci/ymfpci/ymfpci.h
101203 @@ -358,7 +358,7 @@ struct snd_ymfpci {
101204 spinlock_t reg_lock;
101205 spinlock_t voice_lock;
101206 wait_queue_head_t interrupt_sleep;
101207 - atomic_t interrupt_sleep_count;
101208 + atomic_unchecked_t interrupt_sleep_count;
101209 struct snd_info_entry *proc_entry;
101210 const struct firmware *dsp_microcode;
101211 const struct firmware *controller_microcode;
101212 diff --git a/sound/pci/ymfpci/ymfpci_main.c b/sound/pci/ymfpci/ymfpci_main.c
101213 index d591c15..8cb8f94 100644
101214 --- a/sound/pci/ymfpci/ymfpci_main.c
101215 +++ b/sound/pci/ymfpci/ymfpci_main.c
101216 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct snd_ymfpci *chip)
101217 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
101218 break;
101219 }
101220 - if (atomic_read(&chip->interrupt_sleep_count)) {
101221 - atomic_set(&chip->interrupt_sleep_count, 0);
101222 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
101223 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
101224 wake_up(&chip->interrupt_sleep);
101225 }
101226 __end:
101227 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct snd_ymfpci *chip)
101228 continue;
101229 init_waitqueue_entry(&wait, current);
101230 add_wait_queue(&chip->interrupt_sleep, &wait);
101231 - atomic_inc(&chip->interrupt_sleep_count);
101232 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
101233 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
101234 remove_wait_queue(&chip->interrupt_sleep, &wait);
101235 }
101236 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(int irq, void *dev_id)
101237 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
101238 spin_unlock(&chip->reg_lock);
101239
101240 - if (atomic_read(&chip->interrupt_sleep_count)) {
101241 - atomic_set(&chip->interrupt_sleep_count, 0);
101242 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
101243 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
101244 wake_up(&chip->interrupt_sleep);
101245 }
101246 }
101247 @@ -2421,7 +2421,7 @@ int snd_ymfpci_create(struct snd_card *card,
101248 spin_lock_init(&chip->reg_lock);
101249 spin_lock_init(&chip->voice_lock);
101250 init_waitqueue_head(&chip->interrupt_sleep);
101251 - atomic_set(&chip->interrupt_sleep_count, 0);
101252 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
101253 chip->card = card;
101254 chip->pci = pci;
101255 chip->irq = -1;
101256 diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
101257 index 6b81d0c..031c056 100644
101258 --- a/sound/soc/fsl/fsl_ssi.c
101259 +++ b/sound/soc/fsl/fsl_ssi.c
101260 @@ -864,7 +864,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
101261 {
101262 struct fsl_ssi_private *ssi_private;
101263 int ret = 0;
101264 - struct device_attribute *dev_attr = NULL;
101265 + device_attribute_no_const *dev_attr = NULL;
101266 struct device_node *np = pdev->dev.of_node;
101267 const char *p, *sprop;
101268 const uint32_t *iprop;
101269 diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
101270 index 1a38be0..66809d1 100644
101271 --- a/sound/soc/soc-core.c
101272 +++ b/sound/soc/soc-core.c
101273 @@ -2248,8 +2248,10 @@ int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
101274 if (ret)
101275 return ret;
101276
101277 - ops->warm_reset = snd_soc_ac97_warm_reset;
101278 - ops->reset = snd_soc_ac97_reset;
101279 + pax_open_kernel();
101280 + *(void **)&ops->warm_reset = snd_soc_ac97_warm_reset;
101281 + *(void **)&ops->reset = snd_soc_ac97_reset;
101282 + pax_close_kernel();
101283
101284 snd_ac97_rst_cfg = cfg;
101285 return 0;
101286 diff --git a/tools/gcc/.gitignore b/tools/gcc/.gitignore
101287 new file mode 100644
101288 index 0000000..50f2f2f
101289 --- /dev/null
101290 +++ b/tools/gcc/.gitignore
101291 @@ -0,0 +1 @@
101292 +size_overflow_hash.h
101293 diff --git a/tools/gcc/Makefile b/tools/gcc/Makefile
101294 new file mode 100644
101295 index 0000000..144dbee
101296 --- /dev/null
101297 +++ b/tools/gcc/Makefile
101298 @@ -0,0 +1,45 @@
101299 +#CC := gcc
101300 +#PLUGIN_SOURCE_FILES := pax_plugin.c
101301 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
101302 +GCCPLUGINS_DIR := $(shell $(CC) -print-file-name=plugin)
101303 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W -std=gnu99
101304 +
101305 +ifeq ($(PLUGINCC),$(HOSTCC))
101306 +HOSTLIBS := hostlibs
101307 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu99 -ggdb
101308 +else
101309 +HOSTLIBS := hostcxxlibs
101310 +HOST_EXTRACXXFLAGS += -I$(GCCPLUGINS_DIR)/include -std=gnu++98 -ggdb -Wno-unused-parameter
101311 +endif
101312 +
101313 +$(HOSTLIBS)-$(CONFIG_PAX_CONSTIFY_PLUGIN) := constify_plugin.so
101314 +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STACKLEAK) += stackleak_plugin.so
101315 +$(HOSTLIBS)-$(CONFIG_KALLOCSTAT_PLUGIN) += kallocstat_plugin.so
101316 +$(HOSTLIBS)-$(CONFIG_PAX_KERNEXEC_PLUGIN) += kernexec_plugin.so
101317 +$(HOSTLIBS)-$(CONFIG_CHECKER_PLUGIN) += checker_plugin.so
101318 +$(HOSTLIBS)-y += colorize_plugin.so
101319 +$(HOSTLIBS)-$(CONFIG_PAX_SIZE_OVERFLOW) += size_overflow_plugin.so
101320 +$(HOSTLIBS)-$(CONFIG_PAX_LATENT_ENTROPY) += latent_entropy_plugin.so
101321 +$(HOSTLIBS)-$(CONFIG_PAX_MEMORY_STRUCTLEAK) += structleak_plugin.so
101322 +
101323 +always := $($(HOSTLIBS)-y)
101324 +
101325 +constify_plugin-objs := constify_plugin.o
101326 +stackleak_plugin-objs := stackleak_plugin.o
101327 +kallocstat_plugin-objs := kallocstat_plugin.o
101328 +kernexec_plugin-objs := kernexec_plugin.o
101329 +checker_plugin-objs := checker_plugin.o
101330 +colorize_plugin-objs := colorize_plugin.o
101331 +size_overflow_plugin-objs := size_overflow_plugin.o
101332 +latent_entropy_plugin-objs := latent_entropy_plugin.o
101333 +structleak_plugin-objs := structleak_plugin.o
101334 +
101335 +$(obj)/size_overflow_plugin.o: $(objtree)/$(obj)/size_overflow_hash.h
101336 +
101337 +quiet_cmd_build_size_overflow_hash = GENHASH $@
101338 + cmd_build_size_overflow_hash = \
101339 + $(CONFIG_SHELL) $(srctree)/$(src)/generate_size_overflow_hash.sh -d $< -o $@
101340 +$(objtree)/$(obj)/size_overflow_hash.h: $(src)/size_overflow_hash.data FORCE
101341 + $(call if_changed,build_size_overflow_hash)
101342 +
101343 +targets += size_overflow_hash.h
101344 diff --git a/tools/gcc/checker_plugin.c b/tools/gcc/checker_plugin.c
101345 new file mode 100644
101346 index 0000000..22f03c0
101347 --- /dev/null
101348 +++ b/tools/gcc/checker_plugin.c
101349 @@ -0,0 +1,172 @@
101350 +/*
101351 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
101352 + * Licensed under the GPL v2
101353 + *
101354 + * Note: the choice of the license means that the compilation process is
101355 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
101356 + * but for the kernel it doesn't matter since it doesn't link against
101357 + * any of the gcc libraries
101358 + *
101359 + * gcc plugin to implement various sparse (source code checker) features
101360 + *
101361 + * TODO:
101362 + * - define separate __iomem, __percpu and __rcu address spaces (lots of code to patch)
101363 + *
101364 + * BUGS:
101365 + * - none known
101366 + */
101367 +#include "gcc-plugin.h"
101368 +#include "config.h"
101369 +#include "system.h"
101370 +#include "coretypes.h"
101371 +#include "tree.h"
101372 +#include "tree-pass.h"
101373 +#include "flags.h"
101374 +#include "intl.h"
101375 +#include "toplev.h"
101376 +#include "plugin.h"
101377 +//#include "expr.h" where are you...
101378 +#include "diagnostic.h"
101379 +#include "plugin-version.h"
101380 +#include "tm.h"
101381 +#include "function.h"
101382 +#include "basic-block.h"
101383 +#include "gimple.h"
101384 +#include "rtl.h"
101385 +#include "emit-rtl.h"
101386 +#include "tree-flow.h"
101387 +#include "target.h"
101388 +
101389 +extern void c_register_addr_space (const char *str, addr_space_t as);
101390 +extern enum machine_mode default_addr_space_pointer_mode (addr_space_t);
101391 +extern enum machine_mode default_addr_space_address_mode (addr_space_t);
101392 +extern bool default_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as);
101393 +extern bool default_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as);
101394 +extern rtx default_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as);
101395 +
101396 +extern void print_gimple_stmt(FILE *, gimple, int, int);
101397 +extern rtx emit_move_insn(rtx x, rtx y);
101398 +
101399 +int plugin_is_GPL_compatible;
101400 +
101401 +static struct plugin_info checker_plugin_info = {
101402 + .version = "201111150100",
101403 + .help = NULL,
101404 +};
101405 +
101406 +#define ADDR_SPACE_KERNEL 0
101407 +#define ADDR_SPACE_FORCE_KERNEL 1
101408 +#define ADDR_SPACE_USER 2
101409 +#define ADDR_SPACE_FORCE_USER 3
101410 +#define ADDR_SPACE_IOMEM 0
101411 +#define ADDR_SPACE_FORCE_IOMEM 0
101412 +#define ADDR_SPACE_PERCPU 0
101413 +#define ADDR_SPACE_FORCE_PERCPU 0
101414 +#define ADDR_SPACE_RCU 0
101415 +#define ADDR_SPACE_FORCE_RCU 0
101416 +
101417 +static enum machine_mode checker_addr_space_pointer_mode(addr_space_t addrspace)
101418 +{
101419 + return default_addr_space_pointer_mode(ADDR_SPACE_GENERIC);
101420 +}
101421 +
101422 +static enum machine_mode checker_addr_space_address_mode(addr_space_t addrspace)
101423 +{
101424 + return default_addr_space_address_mode(ADDR_SPACE_GENERIC);
101425 +}
101426 +
101427 +static bool checker_addr_space_valid_pointer_mode(enum machine_mode mode, addr_space_t as)
101428 +{
101429 + return default_addr_space_valid_pointer_mode(mode, as);
101430 +}
101431 +
101432 +static bool checker_addr_space_legitimate_address_p(enum machine_mode mode, rtx mem, bool strict, addr_space_t as)
101433 +{
101434 + return default_addr_space_legitimate_address_p(mode, mem, strict, ADDR_SPACE_GENERIC);
101435 +}
101436 +
101437 +static rtx checker_addr_space_legitimize_address(rtx x, rtx oldx, enum machine_mode mode, addr_space_t as)
101438 +{
101439 + return default_addr_space_legitimize_address(x, oldx, mode, as);
101440 +}
101441 +
101442 +static bool checker_addr_space_subset_p(addr_space_t subset, addr_space_t superset)
101443 +{
101444 + if (subset == ADDR_SPACE_FORCE_KERNEL && superset == ADDR_SPACE_KERNEL)
101445 + return true;
101446 +
101447 + if (subset == ADDR_SPACE_FORCE_USER && superset == ADDR_SPACE_USER)
101448 + return true;
101449 +
101450 + if (subset == ADDR_SPACE_FORCE_IOMEM && superset == ADDR_SPACE_IOMEM)
101451 + return true;
101452 +
101453 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_USER)
101454 + return true;
101455 +
101456 + if (subset == ADDR_SPACE_KERNEL && superset == ADDR_SPACE_FORCE_IOMEM)
101457 + return true;
101458 +
101459 + if (subset == ADDR_SPACE_USER && superset == ADDR_SPACE_FORCE_KERNEL)
101460 + return true;
101461 +
101462 + if (subset == ADDR_SPACE_IOMEM && superset == ADDR_SPACE_FORCE_KERNEL)
101463 + return true;
101464 +
101465 + return subset == superset;
101466 +}
101467 +
101468 +static rtx checker_addr_space_convert(rtx op, tree from_type, tree to_type)
101469 +{
101470 +// addr_space_t from_as = TYPE_ADDR_SPACE(TREE_TYPE(from_type));
101471 +// addr_space_t to_as = TYPE_ADDR_SPACE(TREE_TYPE(to_type));
101472 +
101473 + return op;
101474 +}
101475 +
101476 +static void register_checker_address_spaces(void *event_data, void *data)
101477 +{
101478 + c_register_addr_space("__kernel", ADDR_SPACE_KERNEL);
101479 + c_register_addr_space("__force_kernel", ADDR_SPACE_FORCE_KERNEL);
101480 + c_register_addr_space("__user", ADDR_SPACE_USER);
101481 + c_register_addr_space("__force_user", ADDR_SPACE_FORCE_USER);
101482 +// c_register_addr_space("__iomem", ADDR_SPACE_IOMEM);
101483 +// c_register_addr_space("__force_iomem", ADDR_SPACE_FORCE_IOMEM);
101484 +// c_register_addr_space("__percpu", ADDR_SPACE_PERCPU);
101485 +// c_register_addr_space("__force_percpu", ADDR_SPACE_FORCE_PERCPU);
101486 +// c_register_addr_space("__rcu", ADDR_SPACE_RCU);
101487 +// c_register_addr_space("__force_rcu", ADDR_SPACE_FORCE_RCU);
101488 +
101489 + targetm.addr_space.pointer_mode = checker_addr_space_pointer_mode;
101490 + targetm.addr_space.address_mode = checker_addr_space_address_mode;
101491 + targetm.addr_space.valid_pointer_mode = checker_addr_space_valid_pointer_mode;
101492 + targetm.addr_space.legitimate_address_p = checker_addr_space_legitimate_address_p;
101493 +// targetm.addr_space.legitimize_address = checker_addr_space_legitimize_address;
101494 + targetm.addr_space.subset_p = checker_addr_space_subset_p;
101495 + targetm.addr_space.convert = checker_addr_space_convert;
101496 +}
101497 +
101498 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
101499 +{
101500 + const char * const plugin_name = plugin_info->base_name;
101501 + const int argc = plugin_info->argc;
101502 + const struct plugin_argument * const argv = plugin_info->argv;
101503 + int i;
101504 +
101505 + if (!plugin_default_version_check(version, &gcc_version)) {
101506 + error(G_("incompatible gcc/plugin versions"));
101507 + return 1;
101508 + }
101509 +
101510 + register_callback(plugin_name, PLUGIN_INFO, NULL, &checker_plugin_info);
101511 +
101512 + for (i = 0; i < argc; ++i)
101513 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
101514 +
101515 + if (TARGET_64BIT == 0)
101516 + return 0;
101517 +
101518 + register_callback(plugin_name, PLUGIN_PRAGMAS, register_checker_address_spaces, NULL);
101519 +
101520 + return 0;
101521 +}
101522 diff --git a/tools/gcc/colorize_plugin.c b/tools/gcc/colorize_plugin.c
101523 new file mode 100644
101524 index 0000000..414fe5e
101525 --- /dev/null
101526 +++ b/tools/gcc/colorize_plugin.c
101527 @@ -0,0 +1,151 @@
101528 +/*
101529 + * Copyright 2012-2013 by PaX Team <pageexec@freemail.hu>
101530 + * Licensed under the GPL v2
101531 + *
101532 + * Note: the choice of the license means that the compilation process is
101533 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
101534 + * but for the kernel it doesn't matter since it doesn't link against
101535 + * any of the gcc libraries
101536 + *
101537 + * gcc plugin to colorize diagnostic output
101538 + *
101539 + */
101540 +
101541 +#include "gcc-plugin.h"
101542 +#include "config.h"
101543 +#include "system.h"
101544 +#include "coretypes.h"
101545 +#include "tree.h"
101546 +#include "tree-pass.h"
101547 +#include "flags.h"
101548 +#include "intl.h"
101549 +#include "toplev.h"
101550 +#include "plugin.h"
101551 +#include "diagnostic.h"
101552 +#include "plugin-version.h"
101553 +#include "tm.h"
101554 +
101555 +int plugin_is_GPL_compatible;
101556 +
101557 +static struct plugin_info colorize_plugin_info = {
101558 + .version = "201302112000",
101559 + .help = NULL,
101560 +};
101561 +
101562 +#define GREEN "\033[32m\033[2m"
101563 +#define LIGHTGREEN "\033[32m\033[1m"
101564 +#define YELLOW "\033[33m\033[2m"
101565 +#define LIGHTYELLOW "\033[33m\033[1m"
101566 +#define RED "\033[31m\033[2m"
101567 +#define LIGHTRED "\033[31m\033[1m"
101568 +#define BLUE "\033[34m\033[2m"
101569 +#define LIGHTBLUE "\033[34m\033[1m"
101570 +#define BRIGHT "\033[m\033[1m"
101571 +#define NORMAL "\033[m"
101572 +
101573 +static diagnostic_starter_fn old_starter;
101574 +static diagnostic_finalizer_fn old_finalizer;
101575 +
101576 +static void start_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
101577 +{
101578 + const char *color;
101579 + char *newprefix;
101580 +
101581 + switch (diagnostic->kind) {
101582 + case DK_NOTE:
101583 + color = LIGHTBLUE;
101584 + break;
101585 +
101586 + case DK_PEDWARN:
101587 + case DK_WARNING:
101588 + color = LIGHTYELLOW;
101589 + break;
101590 +
101591 + case DK_ERROR:
101592 + case DK_FATAL:
101593 + case DK_ICE:
101594 + case DK_PERMERROR:
101595 + case DK_SORRY:
101596 + color = LIGHTRED;
101597 + break;
101598 +
101599 + default:
101600 + color = NORMAL;
101601 + }
101602 +
101603 + old_starter(context, diagnostic);
101604 + if (-1 == asprintf(&newprefix, "%s%s" NORMAL, color, context->printer->prefix))
101605 + return;
101606 + pp_destroy_prefix(context->printer);
101607 + pp_set_prefix(context->printer, newprefix);
101608 +}
101609 +
101610 +static void finalize_colorize(diagnostic_context *context, diagnostic_info *diagnostic)
101611 +{
101612 + old_finalizer(context, diagnostic);
101613 +}
101614 +
101615 +static void colorize_arm(void)
101616 +{
101617 + old_starter = diagnostic_starter(global_dc);
101618 + old_finalizer = diagnostic_finalizer(global_dc);
101619 +
101620 + diagnostic_starter(global_dc) = start_colorize;
101621 + diagnostic_finalizer(global_dc) = finalize_colorize;
101622 +}
101623 +
101624 +static unsigned int execute_colorize_rearm(void)
101625 +{
101626 + if (diagnostic_starter(global_dc) == start_colorize)
101627 + return 0;
101628 +
101629 + colorize_arm();
101630 + return 0;
101631 +}
101632 +
101633 +struct simple_ipa_opt_pass pass_ipa_colorize_rearm = {
101634 + .pass = {
101635 + .type = SIMPLE_IPA_PASS,
101636 + .name = "colorize_rearm",
101637 +#if BUILDING_GCC_VERSION >= 4008
101638 + .optinfo_flags = OPTGROUP_NONE,
101639 +#endif
101640 + .gate = NULL,
101641 + .execute = execute_colorize_rearm,
101642 + .sub = NULL,
101643 + .next = NULL,
101644 + .static_pass_number = 0,
101645 + .tv_id = TV_NONE,
101646 + .properties_required = 0,
101647 + .properties_provided = 0,
101648 + .properties_destroyed = 0,
101649 + .todo_flags_start = 0,
101650 + .todo_flags_finish = 0
101651 + }
101652 +};
101653 +
101654 +static void colorize_start_unit(void *gcc_data, void *user_data)
101655 +{
101656 + colorize_arm();
101657 +}
101658 +
101659 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
101660 +{
101661 + const char * const plugin_name = plugin_info->base_name;
101662 + struct register_pass_info colorize_rearm_pass_info = {
101663 + .pass = &pass_ipa_colorize_rearm.pass,
101664 + .reference_pass_name = "*free_lang_data",
101665 + .ref_pass_instance_number = 1,
101666 + .pos_op = PASS_POS_INSERT_AFTER
101667 + };
101668 +
101669 + if (!plugin_default_version_check(version, &gcc_version)) {
101670 + error(G_("incompatible gcc/plugin versions"));
101671 + return 1;
101672 + }
101673 +
101674 + register_callback(plugin_name, PLUGIN_INFO, NULL, &colorize_plugin_info);
101675 + register_callback(plugin_name, PLUGIN_START_UNIT, &colorize_start_unit, NULL);
101676 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &colorize_rearm_pass_info);
101677 + return 0;
101678 +}
101679 diff --git a/tools/gcc/constify_plugin.c b/tools/gcc/constify_plugin.c
101680 new file mode 100644
101681 index 0000000..ba59e50
101682 --- /dev/null
101683 +++ b/tools/gcc/constify_plugin.c
101684 @@ -0,0 +1,558 @@
101685 +/*
101686 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
101687 + * Copyright 2011-2013 by PaX Team <pageexec@freemail.hu>
101688 + * Licensed under the GPL v2, or (at your option) v3
101689 + *
101690 + * This gcc plugin constifies all structures which contain only function pointers or are explicitly marked for constification.
101691 + *
101692 + * Homepage:
101693 + * http://www.grsecurity.net/~ephox/const_plugin/
101694 + *
101695 + * Usage:
101696 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
101697 + * $ gcc -fplugin=constify_plugin.so test.c -O2
101698 + */
101699 +
101700 +#include "gcc-plugin.h"
101701 +#include "config.h"
101702 +#include "system.h"
101703 +#include "coretypes.h"
101704 +#include "tree.h"
101705 +#include "tree-pass.h"
101706 +#include "flags.h"
101707 +#include "intl.h"
101708 +#include "toplev.h"
101709 +#include "plugin.h"
101710 +#include "diagnostic.h"
101711 +#include "plugin-version.h"
101712 +#include "tm.h"
101713 +#include "function.h"
101714 +#include "basic-block.h"
101715 +#include "gimple.h"
101716 +#include "rtl.h"
101717 +#include "emit-rtl.h"
101718 +#include "tree-flow.h"
101719 +#include "target.h"
101720 +#include "langhooks.h"
101721 +
101722 +// should come from c-tree.h if only it were installed for gcc 4.5...
101723 +#define C_TYPE_FIELDS_READONLY(TYPE) TREE_LANG_FLAG_1(TYPE)
101724 +
101725 +// unused type flag in all versions 4.5-4.8
101726 +#define TYPE_CONSTIFY_VISITED(TYPE) TYPE_LANG_FLAG_4(TYPE)
101727 +
101728 +int plugin_is_GPL_compatible;
101729 +
101730 +static struct plugin_info const_plugin_info = {
101731 + .version = "201312032345",
101732 + .help = "no-constify\tturn off constification\n",
101733 +};
101734 +
101735 +typedef struct {
101736 + bool has_fptr_field;
101737 + bool has_writable_field;
101738 + bool has_do_const_field;
101739 + bool has_no_const_field;
101740 +} constify_info;
101741 +
101742 +static const_tree get_field_type(const_tree field)
101743 +{
101744 + return strip_array_types(TREE_TYPE(field));
101745 +}
101746 +
101747 +static bool is_fptr(const_tree field)
101748 +{
101749 + const_tree ptr = get_field_type(field);
101750 +
101751 + if (TREE_CODE(ptr) != POINTER_TYPE)
101752 + return false;
101753 +
101754 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
101755 +}
101756 +
101757 +/*
101758 + * determine whether the given structure type meets the requirements for automatic constification,
101759 + * including the constification attributes on nested structure types
101760 + */
101761 +static void constifiable(const_tree node, constify_info *cinfo)
101762 +{
101763 + const_tree field;
101764 +
101765 + gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
101766 +
101767 + // e.g., pointer to structure fields while still constructing the structure type
101768 + if (TYPE_FIELDS(node) == NULL_TREE)
101769 + return;
101770 +
101771 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
101772 + const_tree type = get_field_type(field);
101773 + enum tree_code code = TREE_CODE(type);
101774 +
101775 + if (node == type)
101776 + continue;
101777 +
101778 + if (is_fptr(field))
101779 + cinfo->has_fptr_field = true;
101780 + else if (!TREE_READONLY(field))
101781 + cinfo->has_writable_field = true;
101782 +
101783 + if (code == RECORD_TYPE || code == UNION_TYPE) {
101784 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
101785 + cinfo->has_do_const_field = true;
101786 + else if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type)))
101787 + cinfo->has_no_const_field = true;
101788 + else
101789 + constifiable(type, cinfo);
101790 + }
101791 + }
101792 +}
101793 +
101794 +static bool constified(const_tree node)
101795 +{
101796 + constify_info cinfo = {
101797 + .has_fptr_field = false,
101798 + .has_writable_field = false,
101799 + .has_do_const_field = false,
101800 + .has_no_const_field = false
101801 + };
101802 +
101803 + gcc_assert(TREE_CODE(node) == RECORD_TYPE || TREE_CODE(node) == UNION_TYPE);
101804 +
101805 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node))) {
101806 + gcc_assert(!TYPE_READONLY(node));
101807 + return false;
101808 + }
101809 +
101810 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(node))) {
101811 + gcc_assert(TYPE_READONLY(node));
101812 + return true;
101813 + }
101814 +
101815 + constifiable(node, &cinfo);
101816 + if ((!cinfo.has_fptr_field || cinfo.has_writable_field) && !cinfo.has_do_const_field)
101817 + return false;
101818 +
101819 + return TYPE_READONLY(node);
101820 +}
101821 +
101822 +static void deconstify_tree(tree node);
101823 +
101824 +static void deconstify_type(tree type)
101825 +{
101826 + tree field;
101827 +
101828 + gcc_assert(TREE_CODE(type) == RECORD_TYPE || TREE_CODE(type) == UNION_TYPE);
101829 +
101830 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
101831 + const_tree fieldtype = get_field_type(field);
101832 +
101833 + // special case handling of simple ptr-to-same-array-type members
101834 + if (TREE_CODE(TREE_TYPE(field)) == POINTER_TYPE) {
101835 + const_tree ptrtype = TREE_TYPE(TREE_TYPE(field));
101836 +
101837 + if (TREE_CODE(ptrtype) != RECORD_TYPE && TREE_CODE(ptrtype) != UNION_TYPE)
101838 + continue;
101839 + if (TREE_TYPE(TREE_TYPE(field)) == type)
101840 + continue;
101841 + if (TYPE_MAIN_VARIANT(ptrtype) == TYPE_MAIN_VARIANT(type)) {
101842 + TREE_TYPE(field) = copy_node(TREE_TYPE(field));
101843 + TREE_TYPE(TREE_TYPE(field)) = type;
101844 + }
101845 + continue;
101846 + }
101847 + if (TREE_CODE(fieldtype) != RECORD_TYPE && TREE_CODE(fieldtype) != UNION_TYPE)
101848 + continue;
101849 + if (!constified(fieldtype))
101850 + continue;
101851 +
101852 + deconstify_tree(field);
101853 + TREE_READONLY(field) = 0;
101854 + }
101855 + TYPE_READONLY(type) = 0;
101856 + C_TYPE_FIELDS_READONLY(type) = 0;
101857 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
101858 + TYPE_ATTRIBUTES(type) = remove_attribute("do_const", TYPE_ATTRIBUTES(type));
101859 +}
101860 +
101861 +static void deconstify_tree(tree node)
101862 +{
101863 + tree old_type, new_type, field;
101864 +
101865 + old_type = TREE_TYPE(node);
101866 + while (TREE_CODE(old_type) == ARRAY_TYPE && TREE_CODE(TREE_TYPE(old_type)) != ARRAY_TYPE) {
101867 + node = TREE_TYPE(node) = copy_node(old_type);
101868 + old_type = TREE_TYPE(old_type);
101869 + }
101870 +
101871 + gcc_assert(TREE_CODE(old_type) == RECORD_TYPE || TREE_CODE(old_type) == UNION_TYPE);
101872 + gcc_assert(TYPE_READONLY(old_type) && (TYPE_QUALS(old_type) & TYPE_QUAL_CONST));
101873 +
101874 + new_type = build_qualified_type(old_type, TYPE_QUALS(old_type) & ~TYPE_QUAL_CONST);
101875 + TYPE_FIELDS(new_type) = copy_list(TYPE_FIELDS(new_type));
101876 + for (field = TYPE_FIELDS(new_type); field; field = TREE_CHAIN(field))
101877 + DECL_FIELD_CONTEXT(field) = new_type;
101878 +
101879 + deconstify_type(new_type);
101880 +
101881 + TREE_TYPE(node) = new_type;
101882 +}
101883 +
101884 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
101885 +{
101886 + tree type;
101887 + constify_info cinfo = {
101888 + .has_fptr_field = false,
101889 + .has_writable_field = false,
101890 + .has_do_const_field = false,
101891 + .has_no_const_field = false
101892 + };
101893 +
101894 + *no_add_attrs = true;
101895 + if (TREE_CODE(*node) == FUNCTION_DECL) {
101896 + error("%qE attribute does not apply to functions (%qF)", name, *node);
101897 + return NULL_TREE;
101898 + }
101899 +
101900 + if (TREE_CODE(*node) == PARM_DECL) {
101901 + error("%qE attribute does not apply to function parameters (%qD)", name, *node);
101902 + return NULL_TREE;
101903 + }
101904 +
101905 + if (TREE_CODE(*node) == VAR_DECL) {
101906 + error("%qE attribute does not apply to variables (%qD)", name, *node);
101907 + return NULL_TREE;
101908 + }
101909 +
101910 + if (TYPE_P(*node)) {
101911 + *no_add_attrs = false;
101912 + type = *node;
101913 + } else {
101914 + gcc_assert(TREE_CODE(*node) == TYPE_DECL);
101915 + type = TREE_TYPE(*node);
101916 + }
101917 +
101918 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE) {
101919 + error("%qE attribute used on %qT applies to struct and union types only", name, type);
101920 + return NULL_TREE;
101921 + }
101922 +
101923 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(type))) {
101924 + error("%qE attribute is already applied to the type %qT", name, type);
101925 + return NULL_TREE;
101926 + }
101927 +
101928 + if (TYPE_P(*node)) {
101929 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type)))
101930 + error("%qE attribute used on type %qT is incompatible with 'do_const'", name, type);
101931 + return NULL_TREE;
101932 + }
101933 +
101934 + constifiable(type, &cinfo);
101935 + if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
101936 + deconstify_tree(*node);
101937 + TYPE_CONSTIFY_VISITED(TREE_TYPE(*node)) = 1;
101938 + return NULL_TREE;
101939 + }
101940 +
101941 + error("%qE attribute used on type %qT that is not constified", name, type);
101942 + return NULL_TREE;
101943 +}
101944 +
101945 +static void constify_type(tree type)
101946 +{
101947 + TYPE_READONLY(type) = 1;
101948 + C_TYPE_FIELDS_READONLY(type) = 1;
101949 + TYPE_CONSTIFY_VISITED(type) = 1;
101950 +// TYPE_ATTRIBUTES(type) = tree_cons(get_identifier("do_const"), NULL_TREE, TYPE_ATTRIBUTES(type));
101951 +}
101952 +
101953 +static tree handle_do_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
101954 +{
101955 + *no_add_attrs = true;
101956 + if (!TYPE_P(*node)) {
101957 + error("%qE attribute applies to types only (%qD)", name, *node);
101958 + return NULL_TREE;
101959 + }
101960 +
101961 + if (TREE_CODE(*node) != RECORD_TYPE && TREE_CODE(*node) != UNION_TYPE) {
101962 + error("%qE attribute used on %qT applies to struct and union types only", name, *node);
101963 + return NULL_TREE;
101964 + }
101965 +
101966 + if (lookup_attribute(IDENTIFIER_POINTER(name), TYPE_ATTRIBUTES(*node))) {
101967 + error("%qE attribute used on %qT is already applied to the type", name, *node);
101968 + return NULL_TREE;
101969 + }
101970 +
101971 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(*node))) {
101972 + error("%qE attribute used on %qT is incompatible with 'no_const'", name, *node);
101973 + return NULL_TREE;
101974 + }
101975 +
101976 + *no_add_attrs = false;
101977 + return NULL_TREE;
101978 +}
101979 +
101980 +static struct attribute_spec no_const_attr = {
101981 + .name = "no_const",
101982 + .min_length = 0,
101983 + .max_length = 0,
101984 + .decl_required = false,
101985 + .type_required = false,
101986 + .function_type_required = false,
101987 + .handler = handle_no_const_attribute,
101988 +#if BUILDING_GCC_VERSION >= 4007
101989 + .affects_type_identity = true
101990 +#endif
101991 +};
101992 +
101993 +static struct attribute_spec do_const_attr = {
101994 + .name = "do_const",
101995 + .min_length = 0,
101996 + .max_length = 0,
101997 + .decl_required = false,
101998 + .type_required = false,
101999 + .function_type_required = false,
102000 + .handler = handle_do_const_attribute,
102001 +#if BUILDING_GCC_VERSION >= 4007
102002 + .affects_type_identity = true
102003 +#endif
102004 +};
102005 +
102006 +static void register_attributes(void *event_data, void *data)
102007 +{
102008 + register_attribute(&no_const_attr);
102009 + register_attribute(&do_const_attr);
102010 +}
102011 +
102012 +static void finish_type(void *event_data, void *data)
102013 +{
102014 + tree type = (tree)event_data;
102015 + constify_info cinfo = {
102016 + .has_fptr_field = false,
102017 + .has_writable_field = false,
102018 + .has_do_const_field = false,
102019 + .has_no_const_field = false
102020 + };
102021 +
102022 + if (type == NULL_TREE || type == error_mark_node)
102023 + return;
102024 +
102025 + if (TYPE_FIELDS(type) == NULL_TREE || TYPE_CONSTIFY_VISITED(type))
102026 + return;
102027 +
102028 + constifiable(type, &cinfo);
102029 +
102030 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(type))) {
102031 + if ((cinfo.has_fptr_field && !cinfo.has_writable_field) || cinfo.has_do_const_field) {
102032 + deconstify_type(type);
102033 + TYPE_CONSTIFY_VISITED(type) = 1;
102034 + } else
102035 + error("'no_const' attribute used on type %qT that is not constified", type);
102036 + return;
102037 + }
102038 +
102039 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
102040 + if (!cinfo.has_writable_field) {
102041 + error("'do_const' attribute used on type %qT that is%sconstified", type, cinfo.has_fptr_field ? " " : " not ");
102042 + return;
102043 + }
102044 + constify_type(type);
102045 + return;
102046 + }
102047 +
102048 + if (cinfo.has_fptr_field && !cinfo.has_writable_field) {
102049 + if (lookup_attribute("do_const", TYPE_ATTRIBUTES(type))) {
102050 + error("'do_const' attribute used on type %qT that is constified", type);
102051 + return;
102052 + }
102053 + constify_type(type);
102054 + return;
102055 + }
102056 +
102057 + deconstify_type(type);
102058 + TYPE_CONSTIFY_VISITED(type) = 1;
102059 +}
102060 +
102061 +static void check_global_variables(void)
102062 +{
102063 + struct varpool_node *node;
102064 +
102065 +#if BUILDING_GCC_VERSION <= 4007
102066 + for (node = varpool_nodes; node; node = node->next) {
102067 + tree var = node->decl;
102068 +#else
102069 + FOR_EACH_VARIABLE(node) {
102070 + tree var = node->symbol.decl;
102071 +#endif
102072 + tree type = TREE_TYPE(var);
102073 +
102074 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
102075 + continue;
102076 +
102077 + if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type))
102078 + continue;
102079 +
102080 + if (!TYPE_CONSTIFY_VISITED(type))
102081 + continue;
102082 +
102083 + if (DECL_EXTERNAL(var))
102084 + continue;
102085 +
102086 + if (DECL_INITIAL(var))
102087 + continue;
102088 +
102089 + // this works around a gcc bug/feature where uninitialized globals
102090 + // are moved into the .bss section regardless of any constification
102091 + DECL_INITIAL(var) = build_constructor(type, NULL);
102092 +// inform(DECL_SOURCE_LOCATION(var), "constified variable %qE moved into .rodata", var);
102093 + }
102094 +}
102095 +
102096 +static unsigned int check_local_variables(void)
102097 +{
102098 + unsigned int ret = 0;
102099 + tree var;
102100 +
102101 +#if BUILDING_GCC_VERSION == 4005
102102 + tree vars;
102103 +#else
102104 + unsigned int i;
102105 +#endif
102106 +
102107 +#if BUILDING_GCC_VERSION == 4005
102108 + for (vars = cfun->local_decls; vars; vars = TREE_CHAIN(vars)) {
102109 + var = TREE_VALUE(vars);
102110 +#else
102111 + FOR_EACH_LOCAL_DECL(cfun, i, var) {
102112 +#endif
102113 + tree type = TREE_TYPE(var);
102114 +
102115 + gcc_assert(DECL_P(var));
102116 + if (is_global_var(var))
102117 + continue;
102118 +
102119 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
102120 + continue;
102121 +
102122 + if (!TYPE_READONLY(type) || !C_TYPE_FIELDS_READONLY(type))
102123 + continue;
102124 +
102125 + if (!TYPE_CONSTIFY_VISITED(type))
102126 + continue;
102127 +
102128 + error_at(DECL_SOURCE_LOCATION(var), "constified variable %qE cannot be local", var);
102129 + ret = 1;
102130 + }
102131 + return ret;
102132 +}
102133 +
102134 +static unsigned int check_variables(void)
102135 +{
102136 + check_global_variables();
102137 + return check_local_variables();
102138 +}
102139 +
102140 +static struct gimple_opt_pass pass_local_variable = {
102141 + {
102142 + .type = GIMPLE_PASS,
102143 + .name = "check_variables",
102144 +#if BUILDING_GCC_VERSION >= 4008
102145 + .optinfo_flags = OPTGROUP_NONE,
102146 +#endif
102147 + .gate = NULL,
102148 + .execute = check_variables,
102149 + .sub = NULL,
102150 + .next = NULL,
102151 + .static_pass_number = 0,
102152 + .tv_id = TV_NONE,
102153 + .properties_required = 0,
102154 + .properties_provided = 0,
102155 + .properties_destroyed = 0,
102156 + .todo_flags_start = 0,
102157 + .todo_flags_finish = 0
102158 + }
102159 +};
102160 +
102161 +static struct {
102162 + const char *name;
102163 + const char *asm_op;
102164 +} sections[] = {
102165 + {".init.rodata", "\t.section\t.init.rodata,\"a\""},
102166 + {".ref.rodata", "\t.section\t.ref.rodata,\"a\""},
102167 + {".devinit.rodata", "\t.section\t.devinit.rodata,\"a\""},
102168 + {".devexit.rodata", "\t.section\t.devexit.rodata,\"a\""},
102169 + {".cpuinit.rodata", "\t.section\t.cpuinit.rodata,\"a\""},
102170 + {".cpuexit.rodata", "\t.section\t.cpuexit.rodata,\"a\""},
102171 + {".meminit.rodata", "\t.section\t.meminit.rodata,\"a\""},
102172 + {".memexit.rodata", "\t.section\t.memexit.rodata,\"a\""},
102173 + {".data..read_only", "\t.section\t.data..read_only,\"a\""},
102174 +};
102175 +
102176 +static unsigned int (*old_section_type_flags)(tree decl, const char *name, int reloc);
102177 +
102178 +static unsigned int constify_section_type_flags(tree decl, const char *name, int reloc)
102179 +{
102180 + size_t i;
102181 +
102182 + for (i = 0; i < ARRAY_SIZE(sections); i++)
102183 + if (!strcmp(sections[i].name, name))
102184 + return 0;
102185 + return old_section_type_flags(decl, name, reloc);
102186 +}
102187 +
102188 +static void constify_start_unit(void *gcc_data, void *user_data)
102189 +{
102190 +// size_t i;
102191 +
102192 +// for (i = 0; i < ARRAY_SIZE(sections); i++)
102193 +// sections[i].section = get_unnamed_section(0, output_section_asm_op, sections[i].asm_op);
102194 +// sections[i].section = get_section(sections[i].name, 0, NULL);
102195 +
102196 + old_section_type_flags = targetm.section_type_flags;
102197 + targetm.section_type_flags = constify_section_type_flags;
102198 +}
102199 +
102200 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
102201 +{
102202 + const char * const plugin_name = plugin_info->base_name;
102203 + const int argc = plugin_info->argc;
102204 + const struct plugin_argument * const argv = plugin_info->argv;
102205 + int i;
102206 + bool constify = true;
102207 +
102208 + struct register_pass_info local_variable_pass_info = {
102209 + .pass = &pass_local_variable.pass,
102210 + .reference_pass_name = "ssa",
102211 + .ref_pass_instance_number = 1,
102212 + .pos_op = PASS_POS_INSERT_BEFORE
102213 + };
102214 +
102215 + if (!plugin_default_version_check(version, &gcc_version)) {
102216 + error(G_("incompatible gcc/plugin versions"));
102217 + return 1;
102218 + }
102219 +
102220 + for (i = 0; i < argc; ++i) {
102221 + if (!(strcmp(argv[i].key, "no-constify"))) {
102222 + constify = false;
102223 + continue;
102224 + }
102225 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
102226 + }
102227 +
102228 + if (strcmp(lang_hooks.name, "GNU C")) {
102229 + inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
102230 + constify = false;
102231 + }
102232 +
102233 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
102234 + if (constify) {
102235 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
102236 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &local_variable_pass_info);
102237 + register_callback(plugin_name, PLUGIN_START_UNIT, constify_start_unit, NULL);
102238 + }
102239 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
102240 +
102241 + return 0;
102242 +}
102243 diff --git a/tools/gcc/generate_size_overflow_hash.sh b/tools/gcc/generate_size_overflow_hash.sh
102244 new file mode 100644
102245 index 0000000..e518932
102246 --- /dev/null
102247 +++ b/tools/gcc/generate_size_overflow_hash.sh
102248 @@ -0,0 +1,94 @@
102249 +#!/bin/bash
102250 +
102251 +# This script generates the hash table (size_overflow_hash.h) for the size_overflow gcc plugin (size_overflow_plugin.c).
102252 +
102253 +header1="size_overflow_hash.h"
102254 +database="size_overflow_hash.data"
102255 +n=65536
102256 +
102257 +usage() {
102258 +cat <<EOF
102259 +usage: $0 options
102260 +OPTIONS:
102261 + -h|--help help
102262 + -o header file
102263 + -d database file
102264 + -n hash array size
102265 +EOF
102266 + return 0
102267 +}
102268 +
102269 +while true
102270 +do
102271 + case "$1" in
102272 + -h|--help) usage && exit 0;;
102273 + -n) n=$2; shift 2;;
102274 + -o) header1="$2"; shift 2;;
102275 + -d) database="$2"; shift 2;;
102276 + --) shift 1; break ;;
102277 + *) break ;;
102278 + esac
102279 +done
102280 +
102281 +create_defines() {
102282 + for i in `seq 0 31`
102283 + do
102284 + echo -e "#define PARAM"$i" (1U << "$i")" >> "$header1"
102285 + done
102286 + echo >> "$header1"
102287 +}
102288 +
102289 +create_structs() {
102290 + rm -f "$header1"
102291 +
102292 + create_defines
102293 +
102294 + cat "$database" | while read data
102295 + do
102296 + data_array=($data)
102297 + struct_hash_name="${data_array[0]}"
102298 + funcn="${data_array[1]}"
102299 + params="${data_array[2]}"
102300 + next="${data_array[4]}"
102301 +
102302 + echo "const struct size_overflow_hash $struct_hash_name = {" >> "$header1"
102303 +
102304 + echo -e "\t.next\t= $next,\n\t.name\t= \"$funcn\"," >> "$header1"
102305 + echo -en "\t.param\t= " >> "$header1"
102306 + line=
102307 + for param_num in ${params//-/ };
102308 + do
102309 + line="${line}PARAM"$param_num"|"
102310 + done
102311 +
102312 + echo -e "${line%?},\n};\n" >> "$header1"
102313 + done
102314 +}
102315 +
102316 +create_headers() {
102317 + echo "const struct size_overflow_hash * const size_overflow_hash[$n] = {" >> "$header1"
102318 +}
102319 +
102320 +create_array_elements() {
102321 + index=0
102322 + grep -v "nohasharray" $database | sort -n -k 4 | while read data
102323 + do
102324 + data_array=($data)
102325 + i="${data_array[3]}"
102326 + hash="${data_array[0]}"
102327 + while [[ $index -lt $i ]]
102328 + do
102329 + echo -e "\t["$index"]\t= NULL," >> "$header1"
102330 + index=$(($index + 1))
102331 + done
102332 + index=$(($index + 1))
102333 + echo -e "\t["$i"]\t= &"$hash"," >> "$header1"
102334 + done
102335 + echo '};' >> $header1
102336 +}
102337 +
102338 +create_structs
102339 +create_headers
102340 +create_array_elements
102341 +
102342 +exit 0
102343 diff --git a/tools/gcc/kallocstat_plugin.c b/tools/gcc/kallocstat_plugin.c
102344 new file mode 100644
102345 index 0000000..568b360
102346 --- /dev/null
102347 +++ b/tools/gcc/kallocstat_plugin.c
102348 @@ -0,0 +1,170 @@
102349 +/*
102350 + * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
102351 + * Licensed under the GPL v2
102352 + *
102353 + * Note: the choice of the license means that the compilation process is
102354 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
102355 + * but for the kernel it doesn't matter since it doesn't link against
102356 + * any of the gcc libraries
102357 + *
102358 + * gcc plugin to find the distribution of k*alloc sizes
102359 + *
102360 + * TODO:
102361 + *
102362 + * BUGS:
102363 + * - none known
102364 + */
102365 +#include "gcc-plugin.h"
102366 +#include "config.h"
102367 +#include "system.h"
102368 +#include "coretypes.h"
102369 +#include "tree.h"
102370 +#include "tree-pass.h"
102371 +#include "flags.h"
102372 +#include "intl.h"
102373 +#include "toplev.h"
102374 +#include "plugin.h"
102375 +//#include "expr.h" where are you...
102376 +#include "diagnostic.h"
102377 +#include "plugin-version.h"
102378 +#include "tm.h"
102379 +#include "function.h"
102380 +#include "basic-block.h"
102381 +#include "gimple.h"
102382 +#include "rtl.h"
102383 +#include "emit-rtl.h"
102384 +
102385 +extern void print_gimple_stmt(FILE *, gimple, int, int);
102386 +
102387 +int plugin_is_GPL_compatible;
102388 +
102389 +static const char * const kalloc_functions[] = {
102390 + "__kmalloc",
102391 + "kmalloc",
102392 + "kmalloc_large",
102393 + "kmalloc_node",
102394 + "kmalloc_order",
102395 + "kmalloc_order_trace",
102396 + "kmalloc_slab",
102397 + "kzalloc",
102398 + "kzalloc_node",
102399 +};
102400 +
102401 +static struct plugin_info kallocstat_plugin_info = {
102402 + .version = "201302112000",
102403 +};
102404 +
102405 +static unsigned int execute_kallocstat(void);
102406 +
102407 +static struct gimple_opt_pass kallocstat_pass = {
102408 + .pass = {
102409 + .type = GIMPLE_PASS,
102410 + .name = "kallocstat",
102411 +#if BUILDING_GCC_VERSION >= 4008
102412 + .optinfo_flags = OPTGROUP_NONE,
102413 +#endif
102414 + .gate = NULL,
102415 + .execute = execute_kallocstat,
102416 + .sub = NULL,
102417 + .next = NULL,
102418 + .static_pass_number = 0,
102419 + .tv_id = TV_NONE,
102420 + .properties_required = 0,
102421 + .properties_provided = 0,
102422 + .properties_destroyed = 0,
102423 + .todo_flags_start = 0,
102424 + .todo_flags_finish = 0
102425 + }
102426 +};
102427 +
102428 +static bool is_kalloc(const char *fnname)
102429 +{
102430 + size_t i;
102431 +
102432 + for (i = 0; i < ARRAY_SIZE(kalloc_functions); i++)
102433 + if (!strcmp(fnname, kalloc_functions[i]))
102434 + return true;
102435 + return false;
102436 +}
102437 +
102438 +static unsigned int execute_kallocstat(void)
102439 +{
102440 + basic_block bb;
102441 +
102442 + // 1. loop through BBs and GIMPLE statements
102443 + FOR_EACH_BB(bb) {
102444 + gimple_stmt_iterator gsi;
102445 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
102446 + // gimple match:
102447 + tree fndecl, size;
102448 + gimple call_stmt;
102449 + const char *fnname;
102450 +
102451 + // is it a call
102452 + call_stmt = gsi_stmt(gsi);
102453 + if (!is_gimple_call(call_stmt))
102454 + continue;
102455 + fndecl = gimple_call_fndecl(call_stmt);
102456 + if (fndecl == NULL_TREE)
102457 + continue;
102458 + if (TREE_CODE(fndecl) != FUNCTION_DECL)
102459 + continue;
102460 +
102461 + // is it a call to k*alloc
102462 + fnname = IDENTIFIER_POINTER(DECL_NAME(fndecl));
102463 + if (!is_kalloc(fnname))
102464 + continue;
102465 +
102466 + // is the size arg the result of a simple const assignment
102467 + size = gimple_call_arg(call_stmt, 0);
102468 + while (true) {
102469 + gimple def_stmt;
102470 + expanded_location xloc;
102471 + size_t size_val;
102472 +
102473 + if (TREE_CODE(size) != SSA_NAME)
102474 + break;
102475 + def_stmt = SSA_NAME_DEF_STMT(size);
102476 + if (!def_stmt || !is_gimple_assign(def_stmt))
102477 + break;
102478 + if (gimple_num_ops(def_stmt) != 2)
102479 + break;
102480 + size = gimple_assign_rhs1(def_stmt);
102481 + if (!TREE_CONSTANT(size))
102482 + continue;
102483 + xloc = expand_location(gimple_location(def_stmt));
102484 + if (!xloc.file)
102485 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
102486 + size_val = TREE_INT_CST_LOW(size);
102487 + fprintf(stderr, "kallocsize: %8zu %8zx %s %s:%u\n", size_val, size_val, fnname, xloc.file, xloc.line);
102488 + break;
102489 + }
102490 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
102491 +//debug_tree(gimple_call_fn(call_stmt));
102492 +//print_node(stderr, "pax", fndecl, 4);
102493 + }
102494 + }
102495 +
102496 + return 0;
102497 +}
102498 +
102499 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
102500 +{
102501 + const char * const plugin_name = plugin_info->base_name;
102502 + struct register_pass_info kallocstat_pass_info = {
102503 + .pass = &kallocstat_pass.pass,
102504 + .reference_pass_name = "ssa",
102505 + .ref_pass_instance_number = 1,
102506 + .pos_op = PASS_POS_INSERT_AFTER
102507 + };
102508 +
102509 + if (!plugin_default_version_check(version, &gcc_version)) {
102510 + error(G_("incompatible gcc/plugin versions"));
102511 + return 1;
102512 + }
102513 +
102514 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kallocstat_plugin_info);
102515 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kallocstat_pass_info);
102516 +
102517 + return 0;
102518 +}
102519 diff --git a/tools/gcc/kernexec_plugin.c b/tools/gcc/kernexec_plugin.c
102520 new file mode 100644
102521 index 0000000..a25306b
102522 --- /dev/null
102523 +++ b/tools/gcc/kernexec_plugin.c
102524 @@ -0,0 +1,474 @@
102525 +/*
102526 + * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
102527 + * Licensed under the GPL v2
102528 + *
102529 + * Note: the choice of the license means that the compilation process is
102530 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
102531 + * but for the kernel it doesn't matter since it doesn't link against
102532 + * any of the gcc libraries
102533 + *
102534 + * gcc plugin to make KERNEXEC/amd64 almost as good as it is on i386
102535 + *
102536 + * TODO:
102537 + *
102538 + * BUGS:
102539 + * - none known
102540 + */
102541 +#include "gcc-plugin.h"
102542 +#include "config.h"
102543 +#include "system.h"
102544 +#include "coretypes.h"
102545 +#include "tree.h"
102546 +#include "tree-pass.h"
102547 +#include "flags.h"
102548 +#include "intl.h"
102549 +#include "toplev.h"
102550 +#include "plugin.h"
102551 +//#include "expr.h" where are you...
102552 +#include "diagnostic.h"
102553 +#include "plugin-version.h"
102554 +#include "tm.h"
102555 +#include "function.h"
102556 +#include "basic-block.h"
102557 +#include "gimple.h"
102558 +#include "rtl.h"
102559 +#include "emit-rtl.h"
102560 +#include "tree-flow.h"
102561 +
102562 +extern void print_gimple_stmt(FILE *, gimple, int, int);
102563 +extern rtx emit_move_insn(rtx x, rtx y);
102564 +
102565 +#if BUILDING_GCC_VERSION <= 4006
102566 +#define ANY_RETURN_P(rtx) (GET_CODE(rtx) == RETURN)
102567 +#endif
102568 +
102569 +#if BUILDING_GCC_VERSION >= 4008
102570 +#define TODO_dump_func 0
102571 +#endif
102572 +
102573 +int plugin_is_GPL_compatible;
102574 +
102575 +static struct plugin_info kernexec_plugin_info = {
102576 + .version = "201308230150",
102577 + .help = "method=[bts|or]\tinstrumentation method\n"
102578 +};
102579 +
102580 +static unsigned int execute_kernexec_reload(void);
102581 +static unsigned int execute_kernexec_fptr(void);
102582 +static unsigned int execute_kernexec_retaddr(void);
102583 +static bool kernexec_cmodel_check(void);
102584 +
102585 +static void (*kernexec_instrument_fptr)(gimple_stmt_iterator *);
102586 +static void (*kernexec_instrument_retaddr)(rtx);
102587 +
102588 +static struct gimple_opt_pass kernexec_reload_pass = {
102589 + .pass = {
102590 + .type = GIMPLE_PASS,
102591 + .name = "kernexec_reload",
102592 +#if BUILDING_GCC_VERSION >= 4008
102593 + .optinfo_flags = OPTGROUP_NONE,
102594 +#endif
102595 + .gate = kernexec_cmodel_check,
102596 + .execute = execute_kernexec_reload,
102597 + .sub = NULL,
102598 + .next = NULL,
102599 + .static_pass_number = 0,
102600 + .tv_id = TV_NONE,
102601 + .properties_required = 0,
102602 + .properties_provided = 0,
102603 + .properties_destroyed = 0,
102604 + .todo_flags_start = 0,
102605 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
102606 + }
102607 +};
102608 +
102609 +static struct gimple_opt_pass kernexec_fptr_pass = {
102610 + .pass = {
102611 + .type = GIMPLE_PASS,
102612 + .name = "kernexec_fptr",
102613 +#if BUILDING_GCC_VERSION >= 4008
102614 + .optinfo_flags = OPTGROUP_NONE,
102615 +#endif
102616 + .gate = kernexec_cmodel_check,
102617 + .execute = execute_kernexec_fptr,
102618 + .sub = NULL,
102619 + .next = NULL,
102620 + .static_pass_number = 0,
102621 + .tv_id = TV_NONE,
102622 + .properties_required = 0,
102623 + .properties_provided = 0,
102624 + .properties_destroyed = 0,
102625 + .todo_flags_start = 0,
102626 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa_no_phi
102627 + }
102628 +};
102629 +
102630 +static struct rtl_opt_pass kernexec_retaddr_pass = {
102631 + .pass = {
102632 + .type = RTL_PASS,
102633 + .name = "kernexec_retaddr",
102634 +#if BUILDING_GCC_VERSION >= 4008
102635 + .optinfo_flags = OPTGROUP_NONE,
102636 +#endif
102637 + .gate = kernexec_cmodel_check,
102638 + .execute = execute_kernexec_retaddr,
102639 + .sub = NULL,
102640 + .next = NULL,
102641 + .static_pass_number = 0,
102642 + .tv_id = TV_NONE,
102643 + .properties_required = 0,
102644 + .properties_provided = 0,
102645 + .properties_destroyed = 0,
102646 + .todo_flags_start = 0,
102647 + .todo_flags_finish = TODO_dump_func | TODO_ggc_collect
102648 + }
102649 +};
102650 +
102651 +static bool kernexec_cmodel_check(void)
102652 +{
102653 + tree section;
102654 +
102655 + if (ix86_cmodel != CM_KERNEL)
102656 + return false;
102657 +
102658 + section = lookup_attribute("section", DECL_ATTRIBUTES(current_function_decl));
102659 + if (!section || !TREE_VALUE(section))
102660 + return true;
102661 +
102662 + section = TREE_VALUE(TREE_VALUE(section));
102663 + if (strncmp(TREE_STRING_POINTER(section), ".vsyscall_", 10))
102664 + return true;
102665 +
102666 + return false;
102667 +}
102668 +
102669 +/*
102670 + * add special KERNEXEC instrumentation: reload %r12 after it has been clobbered
102671 + */
102672 +static void kernexec_reload_fptr_mask(gimple_stmt_iterator *gsi)
102673 +{
102674 + gimple asm_movabs_stmt;
102675 +
102676 + // build asm volatile("movabs $0x8000000000000000, %%r12\n\t" : : : );
102677 + asm_movabs_stmt = gimple_build_asm_vec("movabs $0x8000000000000000, %%r12\n\t", NULL, NULL, NULL, NULL);
102678 + gimple_asm_set_volatile(asm_movabs_stmt, true);
102679 + gsi_insert_after(gsi, asm_movabs_stmt, GSI_CONTINUE_LINKING);
102680 + update_stmt(asm_movabs_stmt);
102681 +}
102682 +
102683 +/*
102684 + * find all asm() stmts that clobber r12 and add a reload of r12
102685 + */
102686 +static unsigned int execute_kernexec_reload(void)
102687 +{
102688 + basic_block bb;
102689 +
102690 + // 1. loop through BBs and GIMPLE statements
102691 + FOR_EACH_BB(bb) {
102692 + gimple_stmt_iterator gsi;
102693 +
102694 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
102695 + // gimple match: __asm__ ("" : : : "r12");
102696 + gimple asm_stmt;
102697 + size_t nclobbers;
102698 +
102699 + // is it an asm ...
102700 + asm_stmt = gsi_stmt(gsi);
102701 + if (gimple_code(asm_stmt) != GIMPLE_ASM)
102702 + continue;
102703 +
102704 + // ... clobbering r12
102705 + nclobbers = gimple_asm_nclobbers(asm_stmt);
102706 + while (nclobbers--) {
102707 + tree op = gimple_asm_clobber_op(asm_stmt, nclobbers);
102708 + if (strcmp(TREE_STRING_POINTER(TREE_VALUE(op)), "r12"))
102709 + continue;
102710 + kernexec_reload_fptr_mask(&gsi);
102711 +//print_gimple_stmt(stderr, asm_stmt, 0, TDF_LINENO);
102712 + break;
102713 + }
102714 + }
102715 + }
102716 +
102717 + return 0;
102718 +}
102719 +
102720 +/*
102721 + * add special KERNEXEC instrumentation: force MSB of fptr to 1, which will produce
102722 + * a non-canonical address from a userland ptr and will just trigger a GPF on dereference
102723 + */
102724 +static void kernexec_instrument_fptr_bts(gimple_stmt_iterator *gsi)
102725 +{
102726 + gimple assign_intptr, assign_new_fptr, call_stmt;
102727 + tree intptr, orptr, old_fptr, new_fptr, kernexec_mask;
102728 +
102729 + call_stmt = gsi_stmt(*gsi);
102730 + old_fptr = gimple_call_fn(call_stmt);
102731 +
102732 + // create temporary unsigned long variable used for bitops and cast fptr to it
102733 + intptr = create_tmp_var(long_unsigned_type_node, "kernexec_bts");
102734 +#if BUILDING_GCC_VERSION <= 4007
102735 + add_referenced_var(intptr);
102736 +#endif
102737 + intptr = make_ssa_name(intptr, NULL);
102738 + assign_intptr = gimple_build_assign(intptr, fold_convert(long_unsigned_type_node, old_fptr));
102739 + SSA_NAME_DEF_STMT(intptr) = assign_intptr;
102740 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
102741 + update_stmt(assign_intptr);
102742 +
102743 + // apply logical or to temporary unsigned long and bitmask
102744 + kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0x8000000000000000LL);
102745 +// kernexec_mask = build_int_cstu(long_long_unsigned_type_node, 0xffffffff80000000LL);
102746 + orptr = fold_build2(BIT_IOR_EXPR, long_long_unsigned_type_node, intptr, kernexec_mask);
102747 + intptr = make_ssa_name(SSA_NAME_VAR(intptr), NULL);
102748 + assign_intptr = gimple_build_assign(intptr, orptr);
102749 + SSA_NAME_DEF_STMT(intptr) = assign_intptr;
102750 + gsi_insert_before(gsi, assign_intptr, GSI_SAME_STMT);
102751 + update_stmt(assign_intptr);
102752 +
102753 + // cast temporary unsigned long back to a temporary fptr variable
102754 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_fptr");
102755 +#if BUILDING_GCC_VERSION <= 4007
102756 + add_referenced_var(new_fptr);
102757 +#endif
102758 + new_fptr = make_ssa_name(new_fptr, NULL);
102759 + assign_new_fptr = gimple_build_assign(new_fptr, fold_convert(TREE_TYPE(old_fptr), intptr));
102760 + SSA_NAME_DEF_STMT(new_fptr) = assign_new_fptr;
102761 + gsi_insert_before(gsi, assign_new_fptr, GSI_SAME_STMT);
102762 + update_stmt(assign_new_fptr);
102763 +
102764 + // replace call stmt fn with the new fptr
102765 + gimple_call_set_fn(call_stmt, new_fptr);
102766 + update_stmt(call_stmt);
102767 +}
102768 +
102769 +static void kernexec_instrument_fptr_or(gimple_stmt_iterator *gsi)
102770 +{
102771 + gimple asm_or_stmt, call_stmt;
102772 + tree old_fptr, new_fptr, input, output;
102773 +#if BUILDING_GCC_VERSION <= 4007
102774 + VEC(tree, gc) *inputs = NULL;
102775 + VEC(tree, gc) *outputs = NULL;
102776 +#else
102777 + vec<tree, va_gc> *inputs = NULL;
102778 + vec<tree, va_gc> *outputs = NULL;
102779 +#endif
102780 +
102781 + call_stmt = gsi_stmt(*gsi);
102782 + old_fptr = gimple_call_fn(call_stmt);
102783 +
102784 + // create temporary fptr variable
102785 + new_fptr = create_tmp_var(TREE_TYPE(old_fptr), "kernexec_or");
102786 +#if BUILDING_GCC_VERSION <= 4007
102787 + add_referenced_var(new_fptr);
102788 +#endif
102789 + new_fptr = make_ssa_name(new_fptr, NULL);
102790 +
102791 + // build asm volatile("orq %%r12, %0\n\t" : "=r"(new_fptr) : "0"(old_fptr));
102792 + input = build_tree_list(NULL_TREE, build_string(1, "0"));
102793 + input = chainon(NULL_TREE, build_tree_list(input, old_fptr));
102794 + output = build_tree_list(NULL_TREE, build_string(2, "=r"));
102795 + output = chainon(NULL_TREE, build_tree_list(output, new_fptr));
102796 +#if BUILDING_GCC_VERSION <= 4007
102797 + VEC_safe_push(tree, gc, inputs, input);
102798 + VEC_safe_push(tree, gc, outputs, output);
102799 +#else
102800 + vec_safe_push(inputs, input);
102801 + vec_safe_push(outputs, output);
102802 +#endif
102803 + asm_or_stmt = gimple_build_asm_vec("orq %%r12, %0\n\t", inputs, outputs, NULL, NULL);
102804 + SSA_NAME_DEF_STMT(new_fptr) = asm_or_stmt;
102805 + gimple_asm_set_volatile(asm_or_stmt, true);
102806 + gsi_insert_before(gsi, asm_or_stmt, GSI_SAME_STMT);
102807 + update_stmt(asm_or_stmt);
102808 +
102809 + // replace call stmt fn with the new fptr
102810 + gimple_call_set_fn(call_stmt, new_fptr);
102811 + update_stmt(call_stmt);
102812 +}
102813 +
102814 +/*
102815 + * find all C level function pointer dereferences and forcibly set the highest bit of the pointer
102816 + */
102817 +static unsigned int execute_kernexec_fptr(void)
102818 +{
102819 + basic_block bb;
102820 +
102821 + // 1. loop through BBs and GIMPLE statements
102822 + FOR_EACH_BB(bb) {
102823 + gimple_stmt_iterator gsi;
102824 +
102825 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
102826 + // gimple match: h_1 = get_fptr (); D.2709_3 = h_1 (x_2(D));
102827 + tree fn;
102828 + gimple call_stmt;
102829 +
102830 + // is it a call ...
102831 + call_stmt = gsi_stmt(gsi);
102832 + if (!is_gimple_call(call_stmt))
102833 + continue;
102834 + fn = gimple_call_fn(call_stmt);
102835 + if (TREE_CODE(fn) == ADDR_EXPR)
102836 + continue;
102837 + if (TREE_CODE(fn) != SSA_NAME)
102838 + gcc_unreachable();
102839 +
102840 + // ... through a function pointer
102841 + if (SSA_NAME_VAR(fn) != NULL_TREE) {
102842 + fn = SSA_NAME_VAR(fn);
102843 + if (TREE_CODE(fn) != VAR_DECL && TREE_CODE(fn) != PARM_DECL) {
102844 + debug_tree(fn);
102845 + gcc_unreachable();
102846 + }
102847 + }
102848 + fn = TREE_TYPE(fn);
102849 + if (TREE_CODE(fn) != POINTER_TYPE)
102850 + continue;
102851 + fn = TREE_TYPE(fn);
102852 + if (TREE_CODE(fn) != FUNCTION_TYPE)
102853 + continue;
102854 +
102855 + kernexec_instrument_fptr(&gsi);
102856 +
102857 +//debug_tree(gimple_call_fn(call_stmt));
102858 +//print_gimple_stmt(stderr, call_stmt, 0, TDF_LINENO);
102859 + }
102860 + }
102861 +
102862 + return 0;
102863 +}
102864 +
102865 +// add special KERNEXEC instrumentation: btsq $63,(%rsp) just before retn
102866 +static void kernexec_instrument_retaddr_bts(rtx insn)
102867 +{
102868 + rtx btsq;
102869 + rtvec argvec, constraintvec, labelvec;
102870 + int line;
102871 +
102872 + // create asm volatile("btsq $63,(%%rsp)":::)
102873 + argvec = rtvec_alloc(0);
102874 + constraintvec = rtvec_alloc(0);
102875 + labelvec = rtvec_alloc(0);
102876 + line = expand_location(RTL_LOCATION(insn)).line;
102877 + btsq = gen_rtx_ASM_OPERANDS(VOIDmode, "btsq $63,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
102878 + MEM_VOLATILE_P(btsq) = 1;
102879 +// RTX_FRAME_RELATED_P(btsq) = 1; // not for ASM_OPERANDS
102880 + emit_insn_before(btsq, insn);
102881 +}
102882 +
102883 +// add special KERNEXEC instrumentation: orq %r12,(%rsp) just before retn
102884 +static void kernexec_instrument_retaddr_or(rtx insn)
102885 +{
102886 + rtx orq;
102887 + rtvec argvec, constraintvec, labelvec;
102888 + int line;
102889 +
102890 + // create asm volatile("orq %%r12,(%%rsp)":::)
102891 + argvec = rtvec_alloc(0);
102892 + constraintvec = rtvec_alloc(0);
102893 + labelvec = rtvec_alloc(0);
102894 + line = expand_location(RTL_LOCATION(insn)).line;
102895 + orq = gen_rtx_ASM_OPERANDS(VOIDmode, "orq %%r12,(%%rsp)", empty_string, 0, argvec, constraintvec, labelvec, line);
102896 + MEM_VOLATILE_P(orq) = 1;
102897 +// RTX_FRAME_RELATED_P(orq) = 1; // not for ASM_OPERANDS
102898 + emit_insn_before(orq, insn);
102899 +}
102900 +
102901 +/*
102902 + * find all asm level function returns and forcibly set the highest bit of the return address
102903 + */
102904 +static unsigned int execute_kernexec_retaddr(void)
102905 +{
102906 + rtx insn;
102907 +
102908 +// if (stack_realign_drap)
102909 +// inform(DECL_SOURCE_LOCATION(current_function_decl), "drap detected in %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
102910 +
102911 + // 1. find function returns
102912 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
102913 + // rtl match: (jump_insn 41 40 42 2 (return) fptr.c:42 634 {return_internal} (nil))
102914 + // (jump_insn 12 9 11 2 (parallel [ (return) (unspec [ (0) ] UNSPEC_REP) ]) fptr.c:46 635 {return_internal_long} (nil))
102915 + // (jump_insn 97 96 98 6 (simple_return) fptr.c:50 -1 (nil) -> simple_return)
102916 + rtx body;
102917 +
102918 + // is it a retn
102919 + if (!JUMP_P(insn))
102920 + continue;
102921 + body = PATTERN(insn);
102922 + if (GET_CODE(body) == PARALLEL)
102923 + body = XVECEXP(body, 0, 0);
102924 + if (!ANY_RETURN_P(body))
102925 + continue;
102926 + kernexec_instrument_retaddr(insn);
102927 + }
102928 +
102929 +// print_simple_rtl(stderr, get_insns());
102930 +// print_rtl(stderr, get_insns());
102931 +
102932 + return 0;
102933 +}
102934 +
102935 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
102936 +{
102937 + const char * const plugin_name = plugin_info->base_name;
102938 + const int argc = plugin_info->argc;
102939 + const struct plugin_argument * const argv = plugin_info->argv;
102940 + int i;
102941 + struct register_pass_info kernexec_reload_pass_info = {
102942 + .pass = &kernexec_reload_pass.pass,
102943 + .reference_pass_name = "ssa",
102944 + .ref_pass_instance_number = 1,
102945 + .pos_op = PASS_POS_INSERT_AFTER
102946 + };
102947 + struct register_pass_info kernexec_fptr_pass_info = {
102948 + .pass = &kernexec_fptr_pass.pass,
102949 + .reference_pass_name = "ssa",
102950 + .ref_pass_instance_number = 1,
102951 + .pos_op = PASS_POS_INSERT_AFTER
102952 + };
102953 + struct register_pass_info kernexec_retaddr_pass_info = {
102954 + .pass = &kernexec_retaddr_pass.pass,
102955 + .reference_pass_name = "pro_and_epilogue",
102956 + .ref_pass_instance_number = 1,
102957 + .pos_op = PASS_POS_INSERT_AFTER
102958 + };
102959 +
102960 + if (!plugin_default_version_check(version, &gcc_version)) {
102961 + error(G_("incompatible gcc/plugin versions"));
102962 + return 1;
102963 + }
102964 +
102965 + register_callback(plugin_name, PLUGIN_INFO, NULL, &kernexec_plugin_info);
102966 +
102967 + if (TARGET_64BIT == 0)
102968 + return 0;
102969 +
102970 + for (i = 0; i < argc; ++i) {
102971 + if (!strcmp(argv[i].key, "method")) {
102972 + if (!argv[i].value) {
102973 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
102974 + continue;
102975 + }
102976 + if (!strcmp(argv[i].value, "bts")) {
102977 + kernexec_instrument_fptr = kernexec_instrument_fptr_bts;
102978 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_bts;
102979 + } else if (!strcmp(argv[i].value, "or")) {
102980 + kernexec_instrument_fptr = kernexec_instrument_fptr_or;
102981 + kernexec_instrument_retaddr = kernexec_instrument_retaddr_or;
102982 + fix_register("r12", 1, 1);
102983 + } else
102984 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
102985 + continue;
102986 + }
102987 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
102988 + }
102989 + if (!kernexec_instrument_fptr || !kernexec_instrument_retaddr)
102990 + error(G_("no instrumentation method was selected via '-fplugin-arg-%s-method'"), plugin_name);
102991 +
102992 + if (kernexec_instrument_fptr == kernexec_instrument_fptr_or)
102993 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_reload_pass_info);
102994 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_fptr_pass_info);
102995 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &kernexec_retaddr_pass_info);
102996 +
102997 + return 0;
102998 +}
102999 diff --git a/tools/gcc/latent_entropy_plugin.c b/tools/gcc/latent_entropy_plugin.c
103000 new file mode 100644
103001 index 0000000..679b9ef
103002 --- /dev/null
103003 +++ b/tools/gcc/latent_entropy_plugin.c
103004 @@ -0,0 +1,335 @@
103005 +/*
103006 + * Copyright 2012-2013 by the PaX Team <pageexec@freemail.hu>
103007 + * Licensed under the GPL v2
103008 + *
103009 + * Note: the choice of the license means that the compilation process is
103010 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
103011 + * but for the kernel it doesn't matter since it doesn't link against
103012 + * any of the gcc libraries
103013 + *
103014 + * gcc plugin to help generate a little bit of entropy from program state,
103015 + * used during boot in the kernel
103016 + *
103017 + * TODO:
103018 + * - add ipa pass to identify not explicitly marked candidate functions
103019 + * - mix in more program state (function arguments/return values, loop variables, etc)
103020 + * - more instrumentation control via attribute parameters
103021 + *
103022 + * BUGS:
103023 + * - LTO needs -flto-partition=none for now
103024 + */
103025 +#include "gcc-plugin.h"
103026 +#include "config.h"
103027 +#include "system.h"
103028 +#include "coretypes.h"
103029 +#include "tree.h"
103030 +#include "tree-pass.h"
103031 +#include "flags.h"
103032 +#include "intl.h"
103033 +#include "toplev.h"
103034 +#include "plugin.h"
103035 +//#include "expr.h" where are you...
103036 +#include "diagnostic.h"
103037 +#include "plugin-version.h"
103038 +#include "tm.h"
103039 +#include "function.h"
103040 +#include "basic-block.h"
103041 +#include "gimple.h"
103042 +#include "rtl.h"
103043 +#include "emit-rtl.h"
103044 +#include "tree-flow.h"
103045 +#include "langhooks.h"
103046 +
103047 +#if BUILDING_GCC_VERSION >= 4008
103048 +#define TODO_dump_func 0
103049 +#endif
103050 +
103051 +int plugin_is_GPL_compatible;
103052 +
103053 +static tree latent_entropy_decl;
103054 +
103055 +static struct plugin_info latent_entropy_plugin_info = {
103056 + .version = "201308230230",
103057 + .help = NULL
103058 +};
103059 +
103060 +static unsigned int execute_latent_entropy(void);
103061 +static bool gate_latent_entropy(void);
103062 +
103063 +static struct gimple_opt_pass latent_entropy_pass = {
103064 + .pass = {
103065 + .type = GIMPLE_PASS,
103066 + .name = "latent_entropy",
103067 +#if BUILDING_GCC_VERSION >= 4008
103068 + .optinfo_flags = OPTGROUP_NONE,
103069 +#endif
103070 + .gate = gate_latent_entropy,
103071 + .execute = execute_latent_entropy,
103072 + .sub = NULL,
103073 + .next = NULL,
103074 + .static_pass_number = 0,
103075 + .tv_id = TV_NONE,
103076 + .properties_required = PROP_gimple_leh | PROP_cfg,
103077 + .properties_provided = 0,
103078 + .properties_destroyed = 0,
103079 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
103080 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
103081 + }
103082 +};
103083 +
103084 +static unsigned HOST_WIDE_INT seed;
103085 +static unsigned HOST_WIDE_INT get_random_const(void)
103086 +{
103087 + seed = (seed >> 1U) ^ (-(seed & 1ULL) & 0xD800000000000000ULL);
103088 + return seed;
103089 +}
103090 +
103091 +static tree handle_latent_entropy_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
103092 +{
103093 + switch (TREE_CODE(*node)) {
103094 + default:
103095 + *no_add_attrs = true;
103096 + error("%qE attribute only applies to functions and variables", name);
103097 + break;
103098 +
103099 + case VAR_DECL:
103100 + if (DECL_INITIAL(*node)) {
103101 + *no_add_attrs = true;
103102 + error("variable %qD with %qE attribute must not be initialized", *node, name);
103103 + break;
103104 + }
103105 + DECL_INITIAL(*node) = build_int_cstu(long_long_unsigned_type_node, get_random_const());
103106 + break;
103107 +
103108 + case FUNCTION_DECL:
103109 + break;
103110 + }
103111 +
103112 + return NULL_TREE;
103113 +}
103114 +
103115 +static struct attribute_spec latent_entropy_attr = {
103116 + .name = "latent_entropy",
103117 + .min_length = 0,
103118 + .max_length = 0,
103119 + .decl_required = true,
103120 + .type_required = false,
103121 + .function_type_required = false,
103122 + .handler = handle_latent_entropy_attribute,
103123 +#if BUILDING_GCC_VERSION >= 4007
103124 + .affects_type_identity = false
103125 +#endif
103126 +};
103127 +
103128 +static void register_attributes(void *event_data, void *data)
103129 +{
103130 + register_attribute(&latent_entropy_attr);
103131 +}
103132 +
103133 +static bool gate_latent_entropy(void)
103134 +{
103135 + tree latent_entropy_attr;
103136 +
103137 + latent_entropy_attr = lookup_attribute("latent_entropy", DECL_ATTRIBUTES(current_function_decl));
103138 + return latent_entropy_attr != NULL_TREE;
103139 +}
103140 +
103141 +static enum tree_code get_op(tree *rhs)
103142 +{
103143 + static enum tree_code op;
103144 + unsigned HOST_WIDE_INT random_const;
103145 +
103146 + random_const = get_random_const();
103147 +
103148 + switch (op) {
103149 + case BIT_XOR_EXPR:
103150 + op = PLUS_EXPR;
103151 + break;
103152 +
103153 + case PLUS_EXPR:
103154 + if (rhs) {
103155 + op = LROTATE_EXPR;
103156 + random_const &= HOST_BITS_PER_WIDE_INT - 1;
103157 + break;
103158 + }
103159 +
103160 + case LROTATE_EXPR:
103161 + default:
103162 + op = BIT_XOR_EXPR;
103163 + break;
103164 + }
103165 + if (rhs)
103166 + *rhs = build_int_cstu(unsigned_intDI_type_node, random_const);
103167 + return op;
103168 +}
103169 +
103170 +static void perturb_local_entropy(basic_block bb, tree local_entropy)
103171 +{
103172 + gimple_stmt_iterator gsi;
103173 + gimple assign;
103174 + tree addxorrol, rhs;
103175 + enum tree_code op;
103176 +
103177 + op = get_op(&rhs);
103178 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, op, unsigned_intDI_type_node, local_entropy, rhs);
103179 + assign = gimple_build_assign(local_entropy, addxorrol);
103180 + gsi = gsi_after_labels(bb);
103181 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
103182 + update_stmt(assign);
103183 +//debug_bb(bb);
103184 +}
103185 +
103186 +static void perturb_latent_entropy(basic_block bb, tree rhs)
103187 +{
103188 + gimple_stmt_iterator gsi;
103189 + gimple assign;
103190 + tree addxorrol, temp;
103191 +
103192 + // 1. create temporary copy of latent_entropy
103193 + temp = create_tmp_var(unsigned_intDI_type_node, "temp_latent_entropy");
103194 +#if BUILDING_GCC_VERSION <= 4007
103195 + add_referenced_var(temp);
103196 +#endif
103197 +
103198 + // 2. read...
103199 + temp = make_ssa_name(temp, NULL);
103200 + assign = gimple_build_assign(temp, latent_entropy_decl);
103201 + SSA_NAME_DEF_STMT(temp) = assign;
103202 +#if BUILDING_GCC_VERSION <= 4007
103203 + add_referenced_var(latent_entropy_decl);
103204 +#endif
103205 + gsi = gsi_after_labels(bb);
103206 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
103207 + update_stmt(assign);
103208 +
103209 + // 3. ...modify...
103210 + addxorrol = fold_build2_loc(UNKNOWN_LOCATION, get_op(NULL), unsigned_intDI_type_node, temp, rhs);
103211 + temp = make_ssa_name(SSA_NAME_VAR(temp), NULL);
103212 + assign = gimple_build_assign(temp, addxorrol);
103213 + SSA_NAME_DEF_STMT(temp) = assign;
103214 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
103215 + update_stmt(assign);
103216 +
103217 + // 4. ...write latent_entropy
103218 + assign = gimple_build_assign(latent_entropy_decl, temp);
103219 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
103220 + update_stmt(assign);
103221 +}
103222 +
103223 +static unsigned int execute_latent_entropy(void)
103224 +{
103225 + basic_block bb;
103226 + gimple assign;
103227 + gimple_stmt_iterator gsi;
103228 + tree local_entropy;
103229 +
103230 + if (!latent_entropy_decl) {
103231 + struct varpool_node *node;
103232 +
103233 +#if BUILDING_GCC_VERSION <= 4007
103234 + for (node = varpool_nodes; node; node = node->next) {
103235 + tree var = node->decl;
103236 +#else
103237 + FOR_EACH_VARIABLE(node) {
103238 + tree var = node->symbol.decl;
103239 +#endif
103240 + if (strcmp(IDENTIFIER_POINTER(DECL_NAME(var)), "latent_entropy"))
103241 + continue;
103242 + latent_entropy_decl = var;
103243 +// debug_tree(var);
103244 + break;
103245 + }
103246 + if (!latent_entropy_decl) {
103247 +// debug_tree(current_function_decl);
103248 + return 0;
103249 + }
103250 + }
103251 +
103252 +//fprintf(stderr, "latent_entropy: %s\n", IDENTIFIER_POINTER(DECL_NAME(current_function_decl)));
103253 +
103254 + // 1. create local entropy variable
103255 + local_entropy = create_tmp_var(unsigned_intDI_type_node, "local_entropy");
103256 +#if BUILDING_GCC_VERSION <= 4007
103257 + add_referenced_var(local_entropy);
103258 + mark_sym_for_renaming(local_entropy);
103259 +#endif
103260 +
103261 + // 2. initialize local entropy variable
103262 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
103263 + if (dom_info_available_p(CDI_DOMINATORS))
103264 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
103265 + gsi = gsi_start_bb(bb);
103266 +
103267 + assign = gimple_build_assign(local_entropy, build_int_cstu(unsigned_intDI_type_node, get_random_const()));
103268 +// gimple_set_location(assign, loc);
103269 + gsi_insert_after(&gsi, assign, GSI_NEW_STMT);
103270 + update_stmt(assign);
103271 +//debug_bb(bb);
103272 + bb = bb->next_bb;
103273 +
103274 + // 3. instrument each BB with an operation on the local entropy variable
103275 + while (bb != EXIT_BLOCK_PTR) {
103276 + perturb_local_entropy(bb, local_entropy);
103277 +//debug_bb(bb);
103278 + bb = bb->next_bb;
103279 + };
103280 +
103281 + // 4. mix local entropy into the global entropy variable
103282 + perturb_latent_entropy(EXIT_BLOCK_PTR->prev_bb, local_entropy);
103283 +//debug_bb(EXIT_BLOCK_PTR->prev_bb);
103284 + return 0;
103285 +}
103286 +
103287 +static void start_unit_callback(void *gcc_data, void *user_data)
103288 +{
103289 + tree latent_entropy_type;
103290 +
103291 +#if BUILDING_GCC_VERSION >= 4007
103292 + seed = get_random_seed(false);
103293 +#else
103294 + sscanf(get_random_seed(false), "%" HOST_WIDE_INT_PRINT "x", &seed);
103295 + seed *= seed;
103296 +#endif
103297 +
103298 + if (in_lto_p)
103299 + return;
103300 +
103301 + // extern volatile u64 latent_entropy
103302 + gcc_assert(TYPE_PRECISION(long_long_unsigned_type_node) == 64);
103303 + latent_entropy_type = build_qualified_type(long_long_unsigned_type_node, TYPE_QUALS(long_long_unsigned_type_node) | TYPE_QUAL_VOLATILE);
103304 + latent_entropy_decl = build_decl(UNKNOWN_LOCATION, VAR_DECL, get_identifier("latent_entropy"), latent_entropy_type);
103305 +
103306 + TREE_STATIC(latent_entropy_decl) = 1;
103307 + TREE_PUBLIC(latent_entropy_decl) = 1;
103308 + TREE_USED(latent_entropy_decl) = 1;
103309 + TREE_THIS_VOLATILE(latent_entropy_decl) = 1;
103310 + DECL_EXTERNAL(latent_entropy_decl) = 1;
103311 + DECL_ARTIFICIAL(latent_entropy_decl) = 1;
103312 + lang_hooks.decls.pushdecl(latent_entropy_decl);
103313 +// DECL_ASSEMBLER_NAME(latent_entropy_decl);
103314 +// varpool_finalize_decl(latent_entropy_decl);
103315 +// varpool_mark_needed_node(latent_entropy_decl);
103316 +}
103317 +
103318 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
103319 +{
103320 + const char * const plugin_name = plugin_info->base_name;
103321 + struct register_pass_info latent_entropy_pass_info = {
103322 + .pass = &latent_entropy_pass.pass,
103323 + .reference_pass_name = "optimized",
103324 + .ref_pass_instance_number = 1,
103325 + .pos_op = PASS_POS_INSERT_BEFORE
103326 + };
103327 +
103328 + if (!plugin_default_version_check(version, &gcc_version)) {
103329 + error(G_("incompatible gcc/plugin versions"));
103330 + return 1;
103331 + }
103332 +
103333 + register_callback(plugin_name, PLUGIN_INFO, NULL, &latent_entropy_plugin_info);
103334 + register_callback ("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
103335 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &latent_entropy_pass_info);
103336 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
103337 +
103338 + return 0;
103339 +}
103340 diff --git a/tools/gcc/size_overflow_hash.data b/tools/gcc/size_overflow_hash.data
103341 new file mode 100644
103342 index 0000000..a0c9844
103343 --- /dev/null
103344 +++ b/tools/gcc/size_overflow_hash.data
103345 @@ -0,0 +1,7723 @@
103346 +intel_fake_agp_alloc_by_type_1 intel_fake_agp_alloc_by_type 1 1 NULL
103347 +ocfs2_get_refcount_tree_3 ocfs2_get_refcount_tree 0 3 NULL
103348 +batadv_orig_node_del_if_4 batadv_orig_node_del_if 2 4 NULL
103349 +storvsc_connect_to_vsp_22 storvsc_connect_to_vsp 2 22 NULL
103350 +compat_sock_setsockopt_23 compat_sock_setsockopt 5 23 NULL
103351 +carl9170_alloc_27 carl9170_alloc 1 27 NULL
103352 +sel_read_policyvers_55 sel_read_policyvers 3 55 NULL nohasharray
103353 +padzero_55 padzero 1 55 &sel_read_policyvers_55
103354 +cfg80211_disconnected_57 cfg80211_disconnected 4 57 NULL
103355 +__skb_to_sgvec_72 __skb_to_sgvec 0 72 NULL
103356 +crypto_authenc_setkey_80 crypto_authenc_setkey 3 80 NULL
103357 +snd_korg1212_copy_to_92 snd_korg1212_copy_to 6 92 NULL
103358 +load_msg_95 load_msg 2 95 NULL
103359 +device_flush_iotlb_115 device_flush_iotlb 2-3 115 NULL
103360 +ipath_verbs_send_117 ipath_verbs_send 5-3 117 NULL nohasharray
103361 +write_all_supers_117 write_all_supers 0 117 &ipath_verbs_send_117
103362 +init_q_132 init_q 4 132 NULL
103363 +ocfs2_local_alloc_slide_window_134 ocfs2_local_alloc_slide_window 0 134 NULL
103364 +memstick_alloc_host_142 memstick_alloc_host 1 142 NULL
103365 +hva_to_gfn_memslot_149 hva_to_gfn_memslot 0-1 149 NULL
103366 +ping_v6_sendmsg_152 ping_v6_sendmsg 4 152 NULL
103367 +ext4_ext_get_actual_len_153 ext4_ext_get_actual_len 0 153 NULL nohasharray
103368 +tracing_trace_options_write_153 tracing_trace_options_write 3 153 &ext4_ext_get_actual_len_153
103369 +nvme_create_queue_170 nvme_create_queue 3 170 NULL
103370 +xfs_buf_item_get_format_189 xfs_buf_item_get_format 2 189 NULL
103371 +iscsi_session_setup_196 iscsi_session_setup 4-5 196 NULL
103372 +device_add_bin_attributes_205 device_add_bin_attributes 0 205 NULL
103373 +br_port_info_size_268 br_port_info_size 0 268 NULL
103374 +generic_file_direct_write_291 generic_file_direct_write 0 291 NULL
103375 +read_file_war_stats_292 read_file_war_stats 3 292 NULL
103376 +SYSC_connect_304 SYSC_connect 3 304 NULL
103377 +syslog_print_307 syslog_print 2 307 NULL
103378 +dn_setsockopt_314 dn_setsockopt 5 314 NULL
103379 +next_node_allowed_318 next_node_allowed 1-0 318 NULL
103380 +mlx5_core_access_reg_361 mlx5_core_access_reg 5-3 361 NULL
103381 +xfs_get_blocks_364 xfs_get_blocks 2 364 NULL
103382 +get_more_blocks_367 get_more_blocks 0 367 NULL
103383 +sysfs_create_dir_398 sysfs_create_dir 0 398 NULL
103384 +hw_device_state_409 hw_device_state 0 409 NULL
103385 +aio_read_events_ring_410 aio_read_events_ring 3 410 NULL
103386 +lbs_rdmac_read_418 lbs_rdmac_read 3 418 NULL
103387 +snd_ca0106_ptr_read_467 snd_ca0106_ptr_read 0 467 NULL
103388 +sparse_index_init_482 sparse_index_init 2 482 NULL
103389 +cfs_trace_set_debug_mb_usrstr_486 cfs_trace_set_debug_mb_usrstr 2 486 NULL
103390 +nvme_trans_modesel_data_488 nvme_trans_modesel_data 4 488 NULL
103391 +iwl_dbgfs_protection_mode_write_502 iwl_dbgfs_protection_mode_write 3 502 NULL
103392 +rx_rx_defrag_end_read_505 rx_rx_defrag_end_read 3 505 NULL
103393 +_snd_pcm_hw_param_first_516 _snd_pcm_hw_param_first 0 516 NULL
103394 +ocfs2_validate_meta_ecc_bhs_527 ocfs2_validate_meta_ecc_bhs 0 527 NULL
103395 +zlib_deflate_workspacesize_537 zlib_deflate_workspacesize 0-1-2 537 NULL
103396 +iwl_dbgfs_wowlan_sram_read_540 iwl_dbgfs_wowlan_sram_read 3 540 NULL
103397 +sco_sock_setsockopt_552 sco_sock_setsockopt 5 552 NULL
103398 +lpfc_nlp_state_name_556 lpfc_nlp_state_name 2 556 NULL
103399 +snd_aw2_saa7146_get_hw_ptr_playback_558 snd_aw2_saa7146_get_hw_ptr_playback 0 558 NULL
103400 +dev_hard_header_565 dev_hard_header 0 565 NULL nohasharray
103401 +start_isoc_chain_565 start_isoc_chain 2 565 &dev_hard_header_565
103402 +ocfs2_refcounted_xattr_delete_need_584 ocfs2_refcounted_xattr_delete_need 0 584 NULL
103403 +smk_write_load_self2_591 smk_write_load_self2 3 591 NULL
103404 +cl_page_own0_599 cl_page_own0 0 599 NULL
103405 +btrfs_stack_file_extent_offset_607 btrfs_stack_file_extent_offset 0 607 NULL
103406 +ni_gpct_device_construct_610 ni_gpct_device_construct 5 610 NULL
103407 +fuse_request_alloc_nofs_617 fuse_request_alloc_nofs 1 617 NULL
103408 +ptlrpc_lprocfs_nrs_seq_write_621 ptlrpc_lprocfs_nrs_seq_write 3 621 NULL
103409 +viafb_dfpl_proc_write_627 viafb_dfpl_proc_write 3 627 NULL
103410 +ocfs2_num_free_extents_632 ocfs2_num_free_extents 0 632 NULL
103411 +clone_split_bio_633 clone_split_bio 6 633 NULL
103412 +ceph_osdc_new_request_635 ceph_osdc_new_request 6 635 NULL
103413 +remap_to_cache_640 remap_to_cache 3 640 NULL
103414 +cfs_hash_bkt_size_643 cfs_hash_bkt_size 0 643 NULL nohasharray
103415 +drbd_bm_find_next_643 drbd_bm_find_next 2 643 &cfs_hash_bkt_size_643
103416 +unlink_queued_645 unlink_queued 4 645 NULL
103417 +dtim_interval_read_654 dtim_interval_read 3 654 NULL
103418 +div_u64_rem_672 div_u64_rem 0 672 NULL
103419 +mem_rx_free_mem_blks_read_675 mem_rx_free_mem_blks_read 3 675 NULL
103420 +persistent_ram_vmap_709 persistent_ram_vmap 1-2 709 NULL
103421 +ipath_resize_cq_712 ipath_resize_cq 2 712 NULL
103422 +disk_max_parts_719 disk_max_parts 0 719 NULL
103423 +sctp_setsockopt_peer_addr_params_734 sctp_setsockopt_peer_addr_params 3 734 NULL
103424 +dvb_video_write_754 dvb_video_write 3 754 NULL
103425 +cfs_trace_allocate_string_buffer_781 cfs_trace_allocate_string_buffer 2 781 NULL
103426 +jbd2_journal_dirty_metadata_784 jbd2_journal_dirty_metadata 0 784 NULL
103427 +ath6kl_disconnect_timeout_write_794 ath6kl_disconnect_timeout_write 3 794 NULL
103428 +__vmalloc_node_range_811 __vmalloc_node_range 7 811 NULL nohasharray
103429 +snd_pcm_drain_811 snd_pcm_drain 0 811 &__vmalloc_node_range_811
103430 +if_writecmd_815 if_writecmd 2 815 NULL
103431 +aac_change_queue_depth_825 aac_change_queue_depth 2 825 NULL
103432 +SyS_write_846 SyS_write 3 846 NULL
103433 +um_idi_read_850 um_idi_read 3 850 NULL
103434 +error_state_read_859 error_state_read 6 859 NULL
103435 +o2net_send_message_vec_879 o2net_send_message_vec 4 879 NULL nohasharray
103436 +iwl_dbgfs_fh_reg_read_879 iwl_dbgfs_fh_reg_read 3 879 &o2net_send_message_vec_879
103437 +intel_alloc_iova_883 intel_alloc_iova 3 883 NULL
103438 +snd_pcm_action_single_905 snd_pcm_action_single 0 905 NULL
103439 +readw_931 readw 0 931 NULL
103440 +carl9170_cmd_buf_950 carl9170_cmd_buf 3 950 NULL
103441 +__nodes_weight_956 __nodes_weight 2-0 956 NULL
103442 +bnx2x_fill_fw_str_968 bnx2x_fill_fw_str 3 968 NULL
103443 +memcmp_990 memcmp 0 990 NULL
103444 +pte_prefetch_gfn_to_pfn_997 pte_prefetch_gfn_to_pfn 2 997 NULL
103445 +free_ind_block_999 free_ind_block 0 999 NULL
103446 +dm_cache_set_dirty_1016 dm_cache_set_dirty 2 1016 NULL
103447 +readreg_1017 readreg 0-1 1017 NULL
103448 +_do_truncate_1019 _do_truncate 2 1019 NULL
103449 +smk_write_cipso2_1021 smk_write_cipso2 3 1021 NULL
103450 +gigaset_initdriver_1060 gigaset_initdriver 2 1060 NULL
103451 +mce_request_packet_1073 mce_request_packet 3 1073 NULL
103452 +agp_create_memory_1075 agp_create_memory 1 1075 NULL
103453 +ext4_orphan_add_1080 ext4_orphan_add 0 1080 NULL
103454 +_scsih_adjust_queue_depth_1083 _scsih_adjust_queue_depth 2 1083 NULL
103455 +llcp_sock_sendmsg_1092 llcp_sock_sendmsg 4 1092 NULL
103456 +inode_ref_info_1094 inode_ref_info 0 1094 NULL nohasharray
103457 +llc_mac_hdr_init_1094 llc_mac_hdr_init 0 1094 &inode_ref_info_1094
103458 +nfs4_init_nonuniform_client_string_1097 nfs4_init_nonuniform_client_string 3 1097 NULL
103459 +utf8s_to_utf16s_1115 utf8s_to_utf16s 0 1115 NULL
103460 +__btrfs_cow_block_1125 __btrfs_cow_block 7-0 1125 NULL nohasharray
103461 +__ext4_journal_stop_1125 __ext4_journal_stop 0 1125 &__btrfs_cow_block_1125
103462 +cfg80211_report_obss_beacon_1133 cfg80211_report_obss_beacon 3 1133 NULL
103463 +vmalloc_32_1135 vmalloc_32 1 1135 NULL
103464 +i2400m_rx_ctl_1157 i2400m_rx_ctl 4 1157 NULL
103465 +find_free_extent_1173 find_free_extent 4-0 1173 NULL
103466 +ipc_alloc_1192 ipc_alloc 1 1192 NULL
103467 +ib_create_send_mad_1196 ib_create_send_mad 5 1196 NULL
103468 +pstore_ftrace_knob_write_1198 pstore_ftrace_knob_write 3 1198 NULL
103469 +i2400m_rx_ctl_ack_1199 i2400m_rx_ctl_ack 3 1199 NULL
103470 +dgrp_dpa_read_1204 dgrp_dpa_read 3 1204 NULL
103471 +i2cdev_read_1206 i2cdev_read 3 1206 NULL
103472 +lov_ost_pool_init_1215 lov_ost_pool_init 2 1215 NULL
103473 +acpi_battery_write_alarm_1240 acpi_battery_write_alarm 3 1240 NULL
103474 +nested_get_page_1252 nested_get_page 2 1252 NULL
103475 +ocfs2_extend_file_1266 ocfs2_extend_file 3 1266 NULL
103476 +qla4xxx_change_queue_depth_1268 qla4xxx_change_queue_depth 2 1268 NULL
103477 +ioctl_private_iw_point_1273 ioctl_private_iw_point 7 1273 NULL
103478 +batadv_tt_prepare_packet_buff_1280 batadv_tt_prepare_packet_buff 4 1280 NULL
103479 +SyS_flistxattr_1287 SyS_flistxattr 3 1287 NULL
103480 +tx_frag_in_process_called_read_1290 tx_frag_in_process_called_read 3 1290 NULL
103481 +ocfs2_append_rec_to_path_1321 ocfs2_append_rec_to_path 0 1321 NULL
103482 +ffs_1322 ffs 0 1322 NULL
103483 +qlcnic_pci_sriov_configure_1327 qlcnic_pci_sriov_configure 2 1327 NULL nohasharray
103484 +push_node_left_1327 push_node_left 0 1327 &qlcnic_pci_sriov_configure_1327
103485 +btrfs_submit_compressed_write_1347 btrfs_submit_compressed_write 5 1347 NULL
103486 +gen_pool_best_fit_1348 gen_pool_best_fit 2-3-4 1348 NULL
103487 +btrfs_alloc_logged_file_extent_1354 btrfs_alloc_logged_file_extent 0 1354 NULL
103488 +snd_pcm_lib_write1_1358 snd_pcm_lib_write1 0-3 1358 NULL
103489 +ipx_sendmsg_1362 ipx_sendmsg 4 1362 NULL
103490 +iov_num_pages_1364 iov_num_pages 0 1364 NULL
103491 +fw_stats_raw_read_1369 fw_stats_raw_read 3 1369 NULL
103492 +ocfs2_prepare_inode_for_write_1372 ocfs2_prepare_inode_for_write 3 1372 NULL
103493 +sctp_setsockopt_initmsg_1383 sctp_setsockopt_initmsg 3 1383 NULL
103494 +do_msgsnd_1387 do_msgsnd 4 1387 NULL
103495 +SYSC_io_getevents_1392 SYSC_io_getevents 3 1392 NULL
103496 +zone_page_state_1393 zone_page_state 0 1393 NULL
103497 +file_read_actor_1401 file_read_actor 4 1401 NULL
103498 +vb2_vmalloc_alloc_1402 vb2_vmalloc_alloc 2 1402 NULL
103499 +cfs_trace_copyout_string_1416 cfs_trace_copyout_string 2 1416 NULL
103500 +pq_init_1423 pq_init 1 1423 NULL
103501 +init_rs_internal_1436 init_rs_internal 1 1436 NULL
103502 +vb2_dc_get_user_pages_1442 vb2_dc_get_user_pages 1-3 1442 NULL
103503 +stack_max_size_read_1445 stack_max_size_read 3 1445 NULL
103504 +tx_queue_len_read_1463 tx_queue_len_read 3 1463 NULL
103505 +xprt_alloc_1475 xprt_alloc 2 1475 NULL
103506 +SYSC_syslog_1477 SYSC_syslog 3 1477 NULL
103507 +sta_num_ps_buf_frames_read_1488 sta_num_ps_buf_frames_read 3 1488 NULL
103508 +posix_acl_permission_1495 posix_acl_permission 0 1495 NULL
103509 +fpregs_set_1497 fpregs_set 4 1497 NULL
103510 +ocfs2_alloc_dinode_update_counts_1507 ocfs2_alloc_dinode_update_counts 0 1507 NULL
103511 +tomoyo_round2_1518 tomoyo_round2 0 1518 NULL
103512 +alloc_perm_bits_1532 alloc_perm_bits 2 1532 NULL
103513 +ath6kl_init_get_fwcaps_1557 ath6kl_init_get_fwcaps 3 1557 NULL
103514 +extent_from_logical_1585 extent_from_logical 0 1585 NULL
103515 +ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime_1589 ieee80211_if_read_dot11MeshHWMPnetDiameterTraversalTime 3 1589 NULL
103516 +ipath_ht_handle_hwerrors_1592 ipath_ht_handle_hwerrors 3 1592 NULL
103517 +packet_buffer_init_1607 packet_buffer_init 2 1607 NULL
103518 +btmrvl_hscmd_read_1614 btmrvl_hscmd_read 3 1614 NULL
103519 +v9fs_fid_xattr_get_1618 v9fs_fid_xattr_get 0 1618 NULL
103520 +run_delayed_extent_op_1637 run_delayed_extent_op 0 1637 NULL
103521 +ikconfig_read_current_1658 ikconfig_read_current 3 1658 NULL
103522 +qgroup_rescan_init_1664 qgroup_rescan_init 0 1664 NULL
103523 +mei_cl_recv_1665 mei_cl_recv 3 1665 NULL
103524 +rmap_add_1677 rmap_add 3 1677 NULL
103525 +configfs_read_file_1683 configfs_read_file 3 1683 NULL
103526 +pdu_write_u_1710 pdu_write_u 3 1710 NULL
103527 +coda_psdev_write_1711 coda_psdev_write 3 1711 NULL nohasharray
103528 +gfn_to_hva_prot_1711 gfn_to_hva_prot 2 1711 &coda_psdev_write_1711
103529 +btrfs_dir_data_len_1714 btrfs_dir_data_len 0 1714 NULL
103530 +dma_memcpy_pg_to_iovec_1725 dma_memcpy_pg_to_iovec 6 1725 NULL
103531 +internal_create_group_1733 internal_create_group 0 1733 NULL
103532 +dev_irnet_read_1741 dev_irnet_read 3 1741 NULL
103533 +tx_frag_called_read_1748 tx_frag_called_read 3 1748 NULL
103534 +ebt_size_mwt_1768 ebt_size_mwt 0 1768 NULL
103535 +cosa_write_1774 cosa_write 3 1774 NULL
103536 +fcoe_ctlr_device_add_1793 fcoe_ctlr_device_add 3 1793 NULL
103537 +__nodelist_scnprintf_1815 __nodelist_scnprintf 4-2-0 1815 NULL
103538 +sb_issue_zeroout_1884 sb_issue_zeroout 3-0 1884 NULL
103539 +rx_defrag_called_read_1897 rx_defrag_called_read 3 1897 NULL
103540 +nfs_parse_server_name_1899 nfs_parse_server_name 2 1899 NULL
103541 +SyS_add_key_1900 SyS_add_key 4 1900 NULL
103542 +uhid_char_read_1920 uhid_char_read 3 1920 NULL
103543 +tx_tx_retry_data_read_1926 tx_tx_retry_data_read 3 1926 NULL
103544 +bdev_erase_1933 bdev_erase 3 1933 NULL
103545 +ext3_fiemap_1936 ext3_fiemap 4 1936 NULL
103546 +cyttsp_probe_1940 cyttsp_probe 4 1940 NULL
103547 +ieee80211_if_fmt_dot11MeshConfirmTimeout_1945 ieee80211_if_fmt_dot11MeshConfirmTimeout 3 1945 NULL
103548 +read_swap_header_1957 read_swap_header 0 1957 NULL
103549 +ivtv_v4l2_read_1964 ivtv_v4l2_read 3 1964 NULL
103550 +sel_read_avc_hash_stats_1984 sel_read_avc_hash_stats 3 1984 NULL
103551 +gpio_power_write_1991 gpio_power_write 3 1991 NULL
103552 +__alloc_bootmem_node_1992 __alloc_bootmem_node 2 1992 NULL
103553 +atomic_read_unchecked_1995 atomic_read_unchecked 0 1995 NULL
103554 +sb_min_blocksize_2004 sb_min_blocksize 0-2 2004 NULL
103555 +batadv_tt_commit_changes_2008 batadv_tt_commit_changes 4 2008 NULL
103556 +sep_prepare_input_dma_table_2009 sep_prepare_input_dma_table 2-3 2009 NULL
103557 +rx_rx_defrag_read_2010 rx_rx_defrag_read 3 2010 NULL
103558 +ksm_madvise_2012 ksm_madvise 0 2012 NULL
103559 +ocfs2_global_qinit_alloc_2018 ocfs2_global_qinit_alloc 0 2018 NULL
103560 +write_flush_pipefs_2021 write_flush_pipefs 3 2021 NULL
103561 +BcmCopySection_2035 BcmCopySection 5-0 2035 NULL
103562 +ath6kl_fwlog_mask_read_2050 ath6kl_fwlog_mask_read 3 2050 NULL
103563 +ocfs2_expand_inline_dir_2063 ocfs2_expand_inline_dir 3 2063 NULL
103564 +__generic_copy_from_user_intel_2073 __generic_copy_from_user_intel 0-3 2073 NULL
103565 +diva_set_driver_dbg_mask_2077 diva_set_driver_dbg_mask 0 2077 NULL nohasharray
103566 +alloc_retstack_tasklist_2077 alloc_retstack_tasklist 0 2077 &diva_set_driver_dbg_mask_2077
103567 +iwl_dbgfs_current_sleep_command_read_2081 iwl_dbgfs_current_sleep_command_read 3 2081 NULL
103568 +get_unaligned_le32_2092 get_unaligned_le32 0 2092 NULL
103569 +idetape_chrdev_read_2097 idetape_chrdev_read 3 2097 NULL
103570 +audit_expand_2098 audit_expand 0 2098 NULL
103571 +num_pages_spanned_2105 num_pages_spanned 0 2105 NULL
103572 +shrd128_2106 shrd128 0-1-3-2 2106 NULL
103573 +iwl_dbgfs_log_event_read_2107 iwl_dbgfs_log_event_read 3 2107 NULL
103574 +ecryptfs_encrypt_and_encode_filename_2109 ecryptfs_encrypt_and_encode_filename 6 2109 NULL
103575 +enable_read_2117 enable_read 3 2117 NULL
103576 +pcf50633_write_block_2124 pcf50633_write_block 2-3 2124 NULL
103577 +snd_interval_refine_last_2127 snd_interval_refine_last 0 2127 NULL
103578 +check_load_and_stores_2143 check_load_and_stores 2 2143 NULL
103579 +lp_gpio_irq_map_2149 lp_gpio_irq_map 2 2149 NULL
103580 +iov_iter_count_2152 iov_iter_count 0 2152 NULL
103581 +__copy_to_user_ll_2157 __copy_to_user_ll 0-3 2157 NULL
103582 +ocfs2_et_sanity_check_2164 ocfs2_et_sanity_check 0 2164 NULL
103583 +_ore_get_io_state_2166 _ore_get_io_state 5-3-4 2166 NULL
103584 +bio_integrity_alloc_2194 bio_integrity_alloc 3 2194 NULL
103585 +picolcd_debug_reset_write_2195 picolcd_debug_reset_write 3 2195 NULL
103586 +mei_dbgfs_read_meclients_2219 mei_dbgfs_read_meclients 3 2219 NULL nohasharray
103587 +u32_array_read_2219 u32_array_read 3 2219 &mei_dbgfs_read_meclients_2219
103588 +vhci_write_2224 vhci_write 3 2224 NULL
103589 +__ocfs2_journal_access_2241 __ocfs2_journal_access 0 2241 NULL
103590 +ieee80211_if_read_dot11MeshHWMPRannInterval_2249 ieee80211_if_read_dot11MeshHWMPRannInterval 3 2249 NULL
103591 +netlbl_secattr_catmap_walk_2255 netlbl_secattr_catmap_walk 0-2 2255 NULL
103592 +sel_write_avc_cache_threshold_2256 sel_write_avc_cache_threshold 3 2256 NULL
103593 +do_update_counters_2259 do_update_counters 4 2259 NULL
103594 +ath6kl_wmi_bssinfo_event_rx_2275 ath6kl_wmi_bssinfo_event_rx 3 2275 NULL
103595 +debug_debug5_read_2291 debug_debug5_read 3 2291 NULL
103596 +ocfs2_shift_tree_depth_2292 ocfs2_shift_tree_depth 0 2292 NULL
103597 +kvm_clear_guest_page_2308 kvm_clear_guest_page 2-4 2308 NULL
103598 +intel_sdvo_set_value_2311 intel_sdvo_set_value 4 2311 NULL
103599 +picolcd_fb_write_2318 picolcd_fb_write 3 2318 NULL nohasharray
103600 +hfsplus_find_init_2318 hfsplus_find_init 0 2318 &picolcd_fb_write_2318
103601 +gart_map_page_2325 gart_map_page 4-3 2325 NULL
103602 +__erst_read_to_erange_2341 __erst_read_to_erange 0 2341 NULL
103603 +zr364xx_read_2354 zr364xx_read 3 2354 NULL
103604 +viafb_iga2_odev_proc_write_2363 viafb_iga2_odev_proc_write 3 2363 NULL
103605 +SyS_mremap_2367 SyS_mremap 2-3-1-5 2367 NULL
103606 +xfs_buf_map_from_irec_2368 xfs_buf_map_from_irec 5 2368 NULL nohasharray
103607 +rose_recvmsg_2368 rose_recvmsg 4 2368 &xfs_buf_map_from_irec_2368
103608 +il_dbgfs_sensitivity_read_2370 il_dbgfs_sensitivity_read 3 2370 NULL
103609 +rxpipe_rx_prep_beacon_drop_read_2403 rxpipe_rx_prep_beacon_drop_read 3 2403 NULL
103610 +SYSC_mlock_2415 SYSC_mlock 1-2 2415 NULL
103611 +isdn_v110_open_2418 isdn_v110_open 3 2418 NULL
103612 +raid1_size_2419 raid1_size 0-2 2419 NULL
103613 +timespec_to_jiffies_timeout_2428 timespec_to_jiffies_timeout 0 2428 NULL
103614 +tty_buffer_find_2443 tty_buffer_find 2 2443 NULL
103615 +b43legacy_debugfs_read_2473 b43legacy_debugfs_read 3 2473 NULL
103616 +wiphy_new_2482 wiphy_new 2 2482 NULL
103617 +bio_alloc_bioset_2484 bio_alloc_bioset 2 2484 NULL
103618 +hfsplus_user_setxattr_2485 hfsplus_user_setxattr 4 2485 NULL
103619 +lookup_cache_entry_2494 lookup_cache_entry 2 2494 NULL
103620 +squashfs_read_fragment_index_table_2506 squashfs_read_fragment_index_table 4 2506 NULL
103621 +v9fs_cached_file_read_2514 v9fs_cached_file_read 3 2514 NULL
103622 +ext4_get_inode_loc_2516 ext4_get_inode_loc 0 2516 NULL
103623 +gspca_dev_probe_2570 gspca_dev_probe 4 2570 NULL
103624 +pcm_sanity_check_2574 pcm_sanity_check 0 2574 NULL
103625 +mdc_max_rpcs_in_flight_seq_write_2594 mdc_max_rpcs_in_flight_seq_write 3 2594 NULL
103626 +slot_bytes_2609 slot_bytes 0 2609 NULL
103627 +smk_write_logging_2618 smk_write_logging 3 2618 NULL
103628 +switch_status_2629 switch_status 5 2629 NULL
103629 +tlv_put_string_2631 tlv_put_string 0 2631 NULL
103630 +kvm_gfn_to_hva_cache_init_2636 kvm_gfn_to_hva_cache_init 3 2636 NULL
103631 +tcp_xmit_size_goal_2661 tcp_xmit_size_goal 2 2661 NULL
103632 +osc_build_ppga_2670 osc_build_ppga 2 2670 NULL
103633 +ffs_ep0_read_2672 ffs_ep0_read 3 2672 NULL
103634 +ocfs2_rotate_subtree_right_2674 ocfs2_rotate_subtree_right 0 2674 NULL
103635 +oti6858_write_2692 oti6858_write 4 2692 NULL
103636 +nfc_llcp_send_ui_frame_2702 nfc_llcp_send_ui_frame 5 2702 NULL
103637 +memcpy_fromiovecend_2707 memcpy_fromiovecend 4-3 2707 NULL
103638 +SyS_pwrite64_2708 SyS_pwrite64 3 2708 NULL nohasharray
103639 +lprocfs_stats_counter_size_2708 lprocfs_stats_counter_size 0 2708 &SyS_pwrite64_2708
103640 +__xip_file_write_2733 __xip_file_write 3-4 2733 NULL
103641 +can_nocow_extent_2744 can_nocow_extent 3 2744 NULL
103642 +xfs_readdir_2767 xfs_readdir 3 2767 NULL
103643 +mon_bin_ioctl_2771 mon_bin_ioctl 3 2771 NULL nohasharray
103644 +bictcp_update_2771 bictcp_update 2 2771 &mon_bin_ioctl_2771
103645 +__next_cpu_2782 __next_cpu 1-0 2782 NULL
103646 +set_msr_hyperv_pw_2785 set_msr_hyperv_pw 3 2785 NULL
103647 +device_add_attrs_2789 device_add_attrs 0 2789 NULL
103648 +iwl_dbgfs_clear_ucode_statistics_write_2804 iwl_dbgfs_clear_ucode_statistics_write 3 2804 NULL
103649 +sel_read_enforce_2828 sel_read_enforce 3 2828 NULL
103650 +vb2_dc_get_userptr_2829 vb2_dc_get_userptr 2-3 2829 NULL nohasharray
103651 +snd_pcm_reset_2829 snd_pcm_reset 0 2829 &vb2_dc_get_userptr_2829
103652 +sock_create_2838 sock_create 0 2838 NULL
103653 +wait_for_avail_2847 wait_for_avail 0 2847 NULL
103654 +ufs_free_fragments_2857 ufs_free_fragments 2 2857 NULL
103655 +sfq_alloc_2861 sfq_alloc 1 2861 NULL
103656 +irnet_ctrl_read_2863 irnet_ctrl_read 4 2863 NULL
103657 +move_addr_to_user_2868 move_addr_to_user 2 2868 NULL
103658 +mq_map_2871 mq_map 2 2871 NULL
103659 +__swab64p_2875 __swab64p 0 2875 NULL
103660 +nla_padlen_2883 nla_padlen 1 2883 NULL
103661 +cmm_write_2896 cmm_write 3 2896 NULL
103662 +alloc_page_cgroup_2919 alloc_page_cgroup 1-2 2919 NULL
103663 +osc_import_seq_write_2923 osc_import_seq_write 3 2923 NULL
103664 +xfs_trans_get_buf_map_2927 xfs_trans_get_buf_map 4 2927 NULL
103665 +nes_read_indexed_2946 nes_read_indexed 0 2946 NULL
103666 +tm6000_i2c_recv_regs16_2949 tm6000_i2c_recv_regs16 5 2949 NULL
103667 +i40e_dbg_prep_dump_buf_2951 i40e_dbg_prep_dump_buf 2 2951 NULL
103668 +set_fast_connectable_2952 set_fast_connectable 4 2952 NULL
103669 +free_area_init_core_2962 free_area_init_core 2-3 2962 NULL
103670 +do_strnlen_user_2976 do_strnlen_user 0-2 2976 NULL
103671 +ocfs2_find_branch_target_2989 ocfs2_find_branch_target 0 2989 NULL
103672 +p9_nr_pages_2992 p9_nr_pages 0-2 2992 NULL
103673 +lov_stripetype_seq_write_3013 lov_stripetype_seq_write 3 3013 NULL
103674 +btrfs_add_block_group_cache_3016 btrfs_add_block_group_cache 0 3016 NULL
103675 +do_dmabuf_dirty_sou_3017 do_dmabuf_dirty_sou 7 3017 NULL
103676 +depth_write_3021 depth_write 3 3021 NULL
103677 +snd_azf3328_codec_inl_3022 snd_azf3328_codec_inl 0 3022 NULL
103678 +kvm_unmap_hva_3028 kvm_unmap_hva 2 3028 NULL
103679 +xfrm_dst_alloc_copy_3034 xfrm_dst_alloc_copy 3 3034 NULL
103680 +lpfc_idiag_mbxacc_write_3038 lpfc_idiag_mbxacc_write 3 3038 NULL nohasharray
103681 +iwl_dbgfs_sleep_level_override_read_3038 iwl_dbgfs_sleep_level_override_read 3 3038 &lpfc_idiag_mbxacc_write_3038
103682 +nr_free_buffer_pages_3044 nr_free_buffer_pages 0 3044 NULL
103683 +il3945_ucode_rx_stats_read_3048 il3945_ucode_rx_stats_read 3 3048 NULL
103684 +qp_alloc_ppn_set_3068 qp_alloc_ppn_set 2-4 3068 NULL
103685 +__blk_end_bidi_request_3070 __blk_end_bidi_request 4-3 3070 NULL
103686 +dac960_user_command_proc_write_3071 dac960_user_command_proc_write 3 3071 NULL
103687 +read_file_antenna_diversity_3077 read_file_antenna_diversity 3 3077 NULL
103688 +free_coherent_3082 free_coherent 4-2 3082 NULL
103689 +ocfs2_get_right_path_3097 ocfs2_get_right_path 0 3097 NULL
103690 +clone_bio_3100 clone_bio 6 3100 NULL nohasharray
103691 +ttusb2_msg_3100 ttusb2_msg 4 3100 &clone_bio_3100
103692 +rb_alloc_3102 rb_alloc 1 3102 NULL
103693 +simple_write_to_buffer_3122 simple_write_to_buffer 2-5 3122 NULL
103694 +print_time_3132 print_time 0 3132 NULL
103695 +fill_write_buffer_3142 fill_write_buffer 3 3142 NULL
103696 +find_free_extent_3153 find_free_extent 5 3153 NULL
103697 +CIFSSMBSetPosixACL_3154 CIFSSMBSetPosixACL 5 3154 NULL
103698 +compat_sys_migrate_pages_3157 compat_sys_migrate_pages 2 3157 NULL
103699 +i915_gem_obj_ggtt_offset_3162 i915_gem_obj_ggtt_offset 0 3162 NULL
103700 +uv_num_possible_blades_3177 uv_num_possible_blades 0 3177 NULL
103701 +uvc_video_stats_dump_3181 uvc_video_stats_dump 3 3181 NULL
103702 +compat_do_ip6t_set_ctl_3184 compat_do_ip6t_set_ctl 4 3184 NULL
103703 +wait_table_bits_3187 wait_table_bits 1 3187 NULL
103704 +mempool_create_node_3191 mempool_create_node 1-6 3191 NULL
103705 +alloc_context_3194 alloc_context 1 3194 NULL
103706 +shmem_pread_slow_3198 shmem_pread_slow 3-2 3198 NULL
103707 +codec_reg_write_file_3204 codec_reg_write_file 3 3204 NULL
103708 +SyS_sendto_3219 SyS_sendto 6 3219 NULL
103709 +btrfs_prealloc_file_range_3227 btrfs_prealloc_file_range 3 3227 NULL
103710 +btrfs_next_leaf_3232 btrfs_next_leaf 0 3232 NULL
103711 +kimage_crash_alloc_3233 kimage_crash_alloc 3 3233 NULL
103712 +write_adapter_mem_3234 write_adapter_mem 3 3234 NULL
103713 +do_read_log_to_user_3236 do_read_log_to_user 4 3236 NULL
103714 +ext3_xattr_find_entry_3237 ext3_xattr_find_entry 0 3237 NULL
103715 +key_key_read_3241 key_key_read 3 3241 NULL
103716 +mmap_piobufs_3244 mmap_piobufs 4 3244 NULL
103717 +did_overwrite_first_ref_3259 did_overwrite_first_ref 0 3259 NULL
103718 +__ilog2_u64_3284 __ilog2_u64 0 3284 NULL
103719 +__iovec_copy_from_user_inatomic_3314 __iovec_copy_from_user_inatomic 0-4-3 3314 NULL
103720 +dbDiscardAG_3322 dbDiscardAG 3 3322 NULL
103721 +compat_sys_setsockopt_3326 compat_sys_setsockopt 5 3326 NULL
103722 +read_from_oldmem_3337 read_from_oldmem 2 3337 NULL
103723 +sysfs_create_group_3339 sysfs_create_group 0 3339 NULL
103724 +tty_port_register_device_attr_3341 tty_port_register_device_attr 3 3341 NULL
103725 +il_dbgfs_interrupt_read_3351 il_dbgfs_interrupt_read 3 3351 NULL
103726 +gsm_control_rls_3353 gsm_control_rls 3 3353 NULL
103727 +scnprintf_3360 scnprintf 0-2 3360 NULL
103728 +ReadByteAmd7930_3365 ReadByteAmd7930 0 3365 NULL
103729 +sr_read_3366 sr_read 3 3366 NULL
103730 +count_inode_refs_3375 count_inode_refs 0 3375 NULL
103731 +x86_emulate_instruction_3389 x86_emulate_instruction 2 3389 NULL
103732 +mtdchar_writeoob_3393 mtdchar_writeoob 4 3393 NULL
103733 +send_stream_3397 send_stream 4 3397 NULL
103734 +isdn_readbchan_3401 isdn_readbchan 0-5 3401 NULL
103735 +mei_io_cb_alloc_resp_buf_3414 mei_io_cb_alloc_resp_buf 2 3414 NULL
103736 +zone_to_nid_3415 zone_to_nid 0 3415 NULL
103737 +pci_add_cap_save_buffer_3426 pci_add_cap_save_buffer 3 3426 NULL
103738 +crystalhd_create_dio_pool_3427 crystalhd_create_dio_pool 2 3427 NULL
103739 +SyS_msgsnd_3436 SyS_msgsnd 3 3436 NULL
103740 +send_unlink_3438 send_unlink 0 3438 NULL
103741 +pipe_iov_copy_to_user_3447 pipe_iov_copy_to_user 3 3447 NULL
103742 +map_single_3449 map_single 0 3449 NULL
103743 +softsynth_write_3455 softsynth_write 3 3455 NULL
103744 +jffs2_acl_setxattr_3464 jffs2_acl_setxattr 4 3464 NULL nohasharray
103745 +snd_pcm_lib_readv_transfer_3464 snd_pcm_lib_readv_transfer 5-4-2 3464 &jffs2_acl_setxattr_3464
103746 +security_context_to_sid_default_3492 security_context_to_sid_default 2 3492 NULL
103747 +xfrm_migrate_msgsize_3496 xfrm_migrate_msgsize 1 3496 NULL
103748 +run_one_delayed_ref_3503 run_one_delayed_ref 0 3503 NULL nohasharray
103749 +kvm_handle_bad_page_3503 kvm_handle_bad_page 2 3503 &run_one_delayed_ref_3503
103750 +mem_tx_free_mem_blks_read_3521 mem_tx_free_mem_blks_read 3 3521 NULL
103751 +SyS_semtimedop_3532 SyS_semtimedop 3 3532 NULL
103752 +SyS_readv_3539 SyS_readv 3 3539 NULL
103753 +btrfs_dir_name_len_3549 btrfs_dir_name_len 0 3549 NULL
103754 +iommu_map_3554 iommu_map 3-2 3554 NULL
103755 +btrfs_delayed_update_inode_3557 btrfs_delayed_update_inode 0 3557 NULL
103756 +i915_gem_execbuffer_reserve_3558 i915_gem_execbuffer_reserve 0 3558 NULL
103757 +alloc_smp_resp_3566 alloc_smp_resp 1 3566 NULL
103758 +evtchn_read_3569 evtchn_read 3 3569 NULL
103759 +ll_track_ppid_seq_write_3582 ll_track_ppid_seq_write 3 3582 NULL
103760 +vc_resize_3585 vc_resize 3-2 3585 NULL
103761 +kvm_mmu_notifier_change_pte_3596 kvm_mmu_notifier_change_pte 3 3596 NULL
103762 +sctp_getsockopt_events_3607 sctp_getsockopt_events 2 3607 NULL
103763 +edac_mc_alloc_3611 edac_mc_alloc 4 3611 NULL
103764 +tx_tx_starts_read_3617 tx_tx_starts_read 3 3617 NULL
103765 +aligned_kmalloc_3628 aligned_kmalloc 1 3628 NULL
103766 +x86_swiotlb_alloc_coherent_3649 x86_swiotlb_alloc_coherent 2 3649 NULL
103767 +ath6kl_disconnect_timeout_read_3650 ath6kl_disconnect_timeout_read 3 3650 NULL
103768 +i915_compat_ioctl_3656 i915_compat_ioctl 2 3656 NULL
103769 +replace_pin_at_irq_node_3687 replace_pin_at_irq_node 2 3687 NULL
103770 +ntfs_attr_make_non_resident_3694 ntfs_attr_make_non_resident 0-2 3694 NULL
103771 +snd_m3_assp_read_3703 snd_m3_assp_read 0 3703 NULL nohasharray
103772 +create_irq_3703 create_irq 0 3703 &snd_m3_assp_read_3703
103773 +videobuf_pages_to_sg_3708 videobuf_pages_to_sg 2 3708 NULL
103774 +ci_ll_write_3740 ci_ll_write 4 3740 NULL nohasharray
103775 +ath6kl_mgmt_tx_3740 ath6kl_mgmt_tx 7 3740 &ci_ll_write_3740
103776 +uar_index2pfn_3741 uar_index2pfn 0-2 3741 NULL
103777 +sctp_setsockopt_auth_key_3793 sctp_setsockopt_auth_key 3 3793 NULL
103778 +btrfs_alloc_chunk_3808 btrfs_alloc_chunk 0 3808 NULL
103779 +ncp_file_write_3813 ncp_file_write 3 3813 NULL
103780 +llc_ui_recvmsg_3826 llc_ui_recvmsg 4 3826 NULL
103781 +btrfs_uuid_iter_rem_3831 btrfs_uuid_iter_rem 0 3831 NULL
103782 +hfsplus_direct_IO_3835 hfsplus_direct_IO 4 3835 NULL
103783 +stringify_nodemap_3842 stringify_nodemap 2 3842 NULL
103784 +ubi_eba_read_leb_3847 ubi_eba_read_leb 0 3847 NULL
103785 +create_one_cdev_3852 create_one_cdev 2 3852 NULL
103786 +smk_read_onlycap_3855 smk_read_onlycap 3 3855 NULL
103787 +get_fd_set_3866 get_fd_set 1 3866 NULL
103788 +apei_res_sub_3873 apei_res_sub 0 3873 NULL
103789 +garp_attr_create_3883 garp_attr_create 3 3883 NULL
103790 +efivarfs_file_read_3893 efivarfs_file_read 3 3893 NULL
103791 +nvram_write_3894 nvram_write 3 3894 NULL
103792 +pipeline_pre_proc_swi_read_3898 pipeline_pre_proc_swi_read 3 3898 NULL
103793 +comedi_buf_read_n_available_3899 comedi_buf_read_n_available 0 3899 NULL
103794 +vcs_write_3910 vcs_write 3 3910 NULL
103795 +SyS_move_pages_3920 SyS_move_pages 2 3920 NULL
103796 +hdlc_irq_one_3944 hdlc_irq_one 2 3944 NULL
103797 +brcmf_debugfs_fws_stats_read_3947 brcmf_debugfs_fws_stats_read 3 3947 NULL
103798 +mite_bytes_written_to_memory_lb_3987 mite_bytes_written_to_memory_lb 0 3987 NULL
103799 +do_add_counters_3992 do_add_counters 3 3992 NULL
103800 +obd_alloc_memmd_4002 obd_alloc_memmd 0 4002 NULL
103801 +userspace_status_4004 userspace_status 4 4004 NULL
103802 +mei_write_4005 mei_write 3 4005 NULL nohasharray
103803 +xfs_check_block_4005 xfs_check_block 4 4005 &mei_write_4005
103804 +snd_hdsp_capture_copy_4011 snd_hdsp_capture_copy 5 4011 NULL
103805 +mm_populate_4016 mm_populate 1-2 4016 NULL nohasharray
103806 +i915_gem_object_unbind_4016 i915_gem_object_unbind 0 4016 &mm_populate_4016
103807 +blk_end_request_4024 blk_end_request 3 4024 NULL
103808 +ext4_xattr_find_entry_4025 ext4_xattr_find_entry 0 4025 NULL
103809 +mtip_hw_read_registers_4037 mtip_hw_read_registers 3 4037 NULL
103810 +i915_gpu_idle_4062 i915_gpu_idle 0 4062 NULL
103811 +read_file_queues_4078 read_file_queues 3 4078 NULL
103812 +fbcon_do_set_font_4079 fbcon_do_set_font 2-3 4079 NULL
103813 +btrfs_inc_ref_4084 btrfs_inc_ref 0 4084 NULL
103814 +da9052_free_irq_4090 da9052_free_irq 2 4090 NULL
103815 +tm6000_read_4151 tm6000_read 3 4151 NULL
103816 +mpt_raid_phys_disk_get_num_paths_4155 mpt_raid_phys_disk_get_num_paths 0 4155 NULL
103817 +msg_bits_4158 msg_bits 0-3-4 4158 NULL
103818 +get_alua_req_4166 get_alua_req 3 4166 NULL
103819 +blk_dropped_read_4168 blk_dropped_read 3 4168 NULL
103820 +read_file_bool_4180 read_file_bool 3 4180 NULL
103821 +ocfs2_find_cpos_for_right_leaf_4194 ocfs2_find_cpos_for_right_leaf 0 4194 NULL
103822 +f1x_determine_channel_4202 f1x_determine_channel 2 4202 NULL
103823 +_osd_req_list_objects_4204 _osd_req_list_objects 6 4204 NULL
103824 +__snd_gf1_read_addr_4210 __snd_gf1_read_addr 0 4210 NULL
103825 +ath6kl_force_roam_write_4282 ath6kl_force_roam_write 3 4282 NULL
103826 +goldfish_audio_write_4284 goldfish_audio_write 3 4284 NULL
103827 +msb_do_write_request_4286 msb_do_write_request 2 4286 NULL
103828 +paging32_page_fault_4288 paging32_page_fault 2 4288 NULL
103829 +xt_compat_add_offset_4289 xt_compat_add_offset 0 4289 NULL
103830 +__usbnet_read_cmd_4299 __usbnet_read_cmd 7 4299 NULL
103831 +dvb_ringbuffer_pkt_read_user_4303 dvb_ringbuffer_pkt_read_user 3-2-5 4303 NULL
103832 +__pool_find_4308 __pool_find 3 4308 NULL
103833 +count_strings_4315 count_strings 0 4315 NULL
103834 +nouveau_fifo_create__4327 nouveau_fifo_create_ 5-6 4327 NULL
103835 +snd_rawmidi_kernel_read_4328 snd_rawmidi_kernel_read 3 4328 NULL
103836 +kvm_apic_get_reg_4354 kvm_apic_get_reg 0 4354 NULL
103837 +__copy_from_user_inatomic_4365 __copy_from_user_inatomic 0-3 4365 NULL nohasharray
103838 +lookup_string_4365 lookup_string 0 4365 &__copy_from_user_inatomic_4365
103839 +btrfs_set_disk_extent_flags_4374 btrfs_set_disk_extent_flags 0 4374 NULL
103840 +irda_sendmsg_4388 irda_sendmsg 4 4388 NULL
103841 +access_process_vm_4412 access_process_vm 0-4-2 4412 NULL nohasharray
103842 +cxacru_cm_get_array_4412 cxacru_cm_get_array 4 4412 &access_process_vm_4412
103843 +libfc_vport_create_4415 libfc_vport_create 2 4415 NULL
103844 +rtw_android_get_rssi_4421 rtw_android_get_rssi 0 4421 NULL
103845 +do_pages_stat_4437 do_pages_stat 2 4437 NULL
103846 +memparse_4444 memparse 0 4444 NULL
103847 +tcp_dma_try_early_copy_4457 tcp_dma_try_early_copy 3 4457 NULL
103848 +at76_set_card_command_4471 at76_set_card_command 4 4471 NULL
103849 +snd_seq_expand_var_event_4481 snd_seq_expand_var_event 5-0 4481 NULL
103850 +ocfs2_grow_tree_4492 ocfs2_grow_tree 0 4492 NULL
103851 +vmbus_establish_gpadl_4495 vmbus_establish_gpadl 3 4495 NULL
103852 +set_link_security_4502 set_link_security 4 4502 NULL
103853 +dm_cache_remove_mapping_4513 dm_cache_remove_mapping 2 4513 NULL
103854 +__gfn_to_pfn_memslot_4530 __gfn_to_pfn_memslot 2-0 4530 NULL
103855 +ll_max_readahead_per_file_mb_seq_write_4531 ll_max_readahead_per_file_mb_seq_write 3 4531 NULL
103856 +da9052_group_write_4534 da9052_group_write 2-3 4534 NULL
103857 +tty_register_device_4544 tty_register_device 2 4544 NULL
103858 +videobuf_vmalloc_to_sg_4548 videobuf_vmalloc_to_sg 2 4548 NULL
103859 +btrfs_file_extent_inline_item_len_4575 btrfs_file_extent_inline_item_len 0 4575 NULL
103860 +xfs_buf_get_maps_4581 xfs_buf_get_maps 2 4581 NULL
103861 +iommu_map_page_4588 iommu_map_page 5 4588 NULL
103862 +bch_alloc_4593 bch_alloc 1 4593 NULL
103863 +ocfs2_refcount_lock_4595 ocfs2_refcount_lock 0 4595 NULL
103864 +__wb_force_remove_mapping_4622 __wb_force_remove_mapping 2 4622 NULL
103865 +ll_rw_extents_stats_seq_write_4633 ll_rw_extents_stats_seq_write 3 4633 NULL
103866 +iwl_dbgfs_tx_queue_read_4635 iwl_dbgfs_tx_queue_read 3 4635 NULL
103867 +ext3_orphan_add_4665 ext3_orphan_add 0 4665 NULL
103868 +skb_add_data_nocache_4682 skb_add_data_nocache 4 4682 NULL
103869 +cx18_read_pos_4683 cx18_read_pos 3 4683 NULL
103870 +short_retry_limit_read_4687 short_retry_limit_read 3 4687 NULL
103871 +kone_receive_4690 kone_receive 4 4690 NULL
103872 +link_to_fixup_dir_4699 link_to_fixup_dir 0 4699 NULL
103873 +cxgbi_alloc_big_mem_4707 cxgbi_alloc_big_mem 1 4707 NULL
103874 +konepure_sysfs_read_4709 konepure_sysfs_read 6 4709 NULL
103875 +ati_create_gatt_pages_4722 ati_create_gatt_pages 1 4722 NULL nohasharray
103876 +show_header_4722 show_header 3 4722 &ati_create_gatt_pages_4722
103877 +gfs2_bit_search_4728 gfs2_bit_search 0-2 4728 NULL
103878 +ll_rw_offset_stats_seq_write_4736 ll_rw_offset_stats_seq_write 3 4736 NULL nohasharray
103879 +bitmap_startwrite_4736 bitmap_startwrite 2 4736 &ll_rw_offset_stats_seq_write_4736
103880 +__find_free_cblock_4741 __find_free_cblock 2 4741 NULL
103881 +lu_buf_alloc_4753 lu_buf_alloc 2 4753 NULL
103882 +find_next_best_node_4774 find_next_best_node 1-0 4774 NULL nohasharray
103883 +pwr_rcvd_bcns_cnt_read_4774 pwr_rcvd_bcns_cnt_read 3 4774 &find_next_best_node_4774
103884 +btrfs_del_items_4791 btrfs_del_items 0 4791 NULL nohasharray
103885 +create_subvol_4791 create_subvol 4 4791 &btrfs_del_items_4791
103886 +ncp__vol2io_4804 ncp__vol2io 5 4804 NULL
103887 +repair_io_failure_4815 repair_io_failure 4-3 4815 NULL
103888 +comedi_buf_write_free_4847 comedi_buf_write_free 2 4847 NULL
103889 +gigaset_if_receive_4861 gigaset_if_receive 3 4861 NULL
103890 +key_tx_spec_read_4862 key_tx_spec_read 3 4862 NULL
103891 +get_new_location_4866 get_new_location 0 4866 NULL
103892 +ocfs2_defrag_extent_4873 ocfs2_defrag_extent 3-2 4873 NULL
103893 +hid_register_field_4874 hid_register_field 2-3 4874 NULL
103894 +vga_arb_read_4886 vga_arb_read 3 4886 NULL
103895 +ntfs_rl_insert_4931 ntfs_rl_insert 4-2 4931 NULL
103896 +ieee80211_if_fmt_ave_beacon_4941 ieee80211_if_fmt_ave_beacon 3 4941 NULL
103897 +ocfs2_should_refresh_lock_res_4958 ocfs2_should_refresh_lock_res 0 4958 NULL
103898 +compat_rawv6_setsockopt_4967 compat_rawv6_setsockopt 5 4967 NULL
103899 +skb_network_header_len_4971 skb_network_header_len 0 4971 NULL
103900 +ieee80211_if_fmt_dot11MeshHWMPconfirmationInterval_4976 ieee80211_if_fmt_dot11MeshHWMPconfirmationInterval 3 4976 NULL
103901 +do_sync_read_4977 do_sync_read 0 4977 NULL
103902 +vmw_surface_define_size_4993 vmw_surface_define_size 0 4993 NULL
103903 +compat_SyS_ipc_5000 compat_SyS_ipc 3 5000 NULL
103904 +do_mincore_5018 do_mincore 0-2-1 5018 NULL
103905 +btrfs_punch_hole_5041 btrfs_punch_hole 2 5041 NULL
103906 +cfg80211_rx_mgmt_5056 cfg80211_rx_mgmt 5 5056 NULL
103907 +ocfs2_check_range_for_holes_5066 ocfs2_check_range_for_holes 3-2 5066 NULL
103908 +snd_mixart_BA1_read_5082 snd_mixart_BA1_read 5 5082 NULL
103909 +snd_emu10k1_ptr20_read_5087 snd_emu10k1_ptr20_read 0 5087 NULL
103910 +get_random_bytes_5091 get_random_bytes 2 5091 NULL nohasharray
103911 +kfifo_copy_from_user_5091 kfifo_copy_from_user 4-3-0 5091 &get_random_bytes_5091 nohasharray
103912 +blk_rq_sectors_5091 blk_rq_sectors 0 5091 &kfifo_copy_from_user_5091
103913 +sound_write_5102 sound_write 3 5102 NULL
103914 +clear_dirty_5105 clear_dirty 3 5105 NULL
103915 +i40e_dbg_netdev_ops_write_5117 i40e_dbg_netdev_ops_write 3 5117 NULL
103916 +qib_7220_handle_hwerrors_5142 qib_7220_handle_hwerrors 3 5142 NULL
103917 +ufs_add_fragments_5144 ufs_add_fragments 2 5144 NULL
103918 +ocfs2_inode_lock_full_nested_5148 ocfs2_inode_lock_full_nested 0 5148 NULL
103919 +__uwb_addr_print_5161 __uwb_addr_print 2 5161 NULL
103920 +iwl_dbgfs_status_read_5171 iwl_dbgfs_status_read 3 5171 NULL
103921 +acpi_pcc_get_sqty_5176 acpi_pcc_get_sqty 0 5176 NULL
103922 +ppp_cp_parse_cr_5214 ppp_cp_parse_cr 4 5214 NULL nohasharray
103923 +r600_mip_minify_5214 r600_mip_minify 2-1-0 5214 &ppp_cp_parse_cr_5214
103924 +dwc2_hcd_urb_alloc_5217 dwc2_hcd_urb_alloc 2 5217 NULL
103925 +ath6kl_debug_roam_tbl_event_5224 ath6kl_debug_roam_tbl_event 3 5224 NULL
103926 +nouveau_fb_create__5244 nouveau_fb_create_ 5 5244 NULL
103927 +gfn_to_hva_memslot_5265 gfn_to_hva_memslot 2 5265 NULL
103928 +btrfs_alloc_reserved_file_extent_5274 btrfs_alloc_reserved_file_extent 0 5274 NULL
103929 +iommu_domain_identity_map_5284 iommu_domain_identity_map 2-3 5284 NULL
103930 +alloc_cache_blocks_with_hash_5285 alloc_cache_blocks_with_hash 2 5285 NULL
103931 +usb_descriptor_fillbuf_5302 usb_descriptor_fillbuf 0 5302 NULL
103932 +__gfn_to_hva_memslot_5304 __gfn_to_hva_memslot 0-2 5304 NULL
103933 +r592_write_fifo_pio_5315 r592_write_fifo_pio 3 5315 NULL
103934 +sbc_get_write_same_sectors_5317 sbc_get_write_same_sectors 0 5317 NULL
103935 +pwr_elp_enter_read_5324 pwr_elp_enter_read 3 5324 NULL
103936 +allocate_cnodes_5329 allocate_cnodes 1 5329 NULL
103937 +vm_insert_pfn_5341 vm_insert_pfn 3-0 5341 NULL
103938 +cq_free_res_5355 cq_free_res 5 5355 NULL
103939 +ps_pspoll_utilization_read_5361 ps_pspoll_utilization_read 3 5361 NULL
103940 +cciss_allocate_sg_chain_blocks_5368 cciss_allocate_sg_chain_blocks 2-3 5368 NULL
103941 +kvm_pin_pages_5369 kvm_pin_pages 2-0 5369 NULL
103942 +bitmap_fold_5396 bitmap_fold 4 5396 NULL
103943 +security_inode_init_security_5408 security_inode_init_security 0 5408 NULL
103944 +__resolve_indirect_refs_5409 __resolve_indirect_refs 0 5409 NULL
103945 +nilfs_palloc_entries_per_group_5418 nilfs_palloc_entries_per_group 0 5418 NULL
103946 +check_item_in_log_5440 check_item_in_log 0 5440 NULL
103947 +__split_bvec_across_targets_5454 __split_bvec_across_targets 3 5454 NULL
103948 +xfs_efd_init_5463 xfs_efd_init 3 5463 NULL
103949 +xfs_efi_init_5476 xfs_efi_init 2 5476 NULL
103950 +ubi_leb_write_5478 ubi_leb_write 4-5 5478 NULL
103951 +cifs_security_flags_proc_write_5484 cifs_security_flags_proc_write 3 5484 NULL
103952 +tty_write_5494 tty_write 3 5494 NULL
103953 +tomoyo_update_domain_5498 tomoyo_update_domain 2 5498 NULL nohasharray
103954 +ieee80211_if_fmt_last_beacon_5498 ieee80211_if_fmt_last_beacon 3 5498 &tomoyo_update_domain_5498
103955 +__max_nr_grant_frames_5505 __max_nr_grant_frames 0 5505 NULL
103956 +ieee80211_if_fmt_auto_open_plinks_5534 ieee80211_if_fmt_auto_open_plinks 3 5534 NULL
103957 +iommu_prepare_identity_map_5540 iommu_prepare_identity_map 2-3 5540 NULL
103958 +get_entry_msg_len_5552 get_entry_msg_len 0 5552 NULL
103959 +le_readq_5557 le_readq 0 5557 NULL
103960 +inw_5558 inw 0 5558 NULL
103961 +bioset_create_5580 bioset_create 1 5580 NULL
103962 +domain_sg_mapping_5586 domain_sg_mapping 4 5586 NULL
103963 +oz_ep_alloc_5587 oz_ep_alloc 1 5587 NULL
103964 +ldm_frag_add_5611 ldm_frag_add 2 5611 NULL
103965 +compat_copy_entries_5617 compat_copy_entries 0 5617 NULL
103966 +SYSC_init_module_5626 SYSC_init_module 2 5626 NULL
103967 +iterate_extent_inodes_5631 iterate_extent_inodes 0 5631 NULL
103968 +SYSC_fsetxattr_5639 SYSC_fsetxattr 4 5639 NULL
103969 +ext4_xattr_get_5661 ext4_xattr_get 0 5661 NULL
103970 +posix_clock_register_5662 posix_clock_register 2 5662 NULL
103971 +kgdb_arch_pc_5673 kgdb_arch_pc 0 5673 NULL
103972 +wb_clear_dirty_5684 wb_clear_dirty 2 5684 NULL
103973 +get_arg_5694 get_arg 3 5694 NULL
103974 +subbuf_read_actor_5708 subbuf_read_actor 3 5708 NULL
103975 +ntfs_attr_record_resize_5720 ntfs_attr_record_resize 0 5720 NULL
103976 +vmw_kms_readback_5727 vmw_kms_readback 6 5727 NULL
103977 +rts51x_transfer_data_partial_5735 rts51x_transfer_data_partial 6 5735 NULL
103978 +ubi_calc_fm_size_5749 ubi_calc_fm_size 0 5749 NULL
103979 +sctp_setsockopt_autoclose_5775 sctp_setsockopt_autoclose 3 5775 NULL
103980 +__vxge_hw_blockpool_malloc_5786 __vxge_hw_blockpool_malloc 2 5786 NULL
103981 +nvme_trans_bdev_char_page_5797 nvme_trans_bdev_char_page 3 5797 NULL
103982 +skb_copy_datagram_iovec_5806 skb_copy_datagram_iovec 2-4 5806 NULL
103983 +nv50_disp_pioc_create__5812 nv50_disp_pioc_create_ 5 5812 NULL
103984 +ceph_x_encrypt_buflen_5829 ceph_x_encrypt_buflen 0-1 5829 NULL
103985 +ceph_msg_new_5846 ceph_msg_new 2 5846 NULL
103986 +setup_req_5848 setup_req 3 5848 NULL
103987 +ria_page_count_5849 ria_page_count 0 5849 NULL
103988 +rx_filter_max_arp_queue_dep_read_5851 rx_filter_max_arp_queue_dep_read 3 5851 NULL
103989 +config_buf_5862 config_buf 0 5862 NULL
103990 +ext4_ext_correct_indexes_5865 ext4_ext_correct_indexes 0 5865 NULL
103991 +paging64_walk_addr_5887 paging64_walk_addr 3 5887 NULL
103992 +scan_bitmap_5888 scan_bitmap 3 5888 NULL
103993 +lprocfs_fid_width_seq_write_5889 lprocfs_fid_width_seq_write 3 5889 NULL
103994 +port_show_regs_5904 port_show_regs 3 5904 NULL
103995 +rbd_segment_length_5907 rbd_segment_length 0-3-2 5907 NULL
103996 +uhci_debug_read_5911 uhci_debug_read 3 5911 NULL
103997 +lbs_highsnr_read_5931 lbs_highsnr_read 3 5931 NULL
103998 +ps_poll_ps_poll_timeouts_read_5934 ps_poll_ps_poll_timeouts_read 3 5934 NULL
103999 +edac_device_alloc_ctl_info_5941 edac_device_alloc_ctl_info 1 5941 NULL
104000 +find_parent_nodes_5948 find_parent_nodes 0 5948 NULL
104001 +ll_statahead_one_5962 ll_statahead_one 3 5962 NULL
104002 +__apu_get_register_5967 __apu_get_register 0 5967 NULL
104003 +ieee80211_if_fmt_rc_rateidx_mask_5ghz_5971 ieee80211_if_fmt_rc_rateidx_mask_5ghz 3 5971 NULL
104004 +native_pte_val_5978 native_pte_val 0 5978 NULL
104005 +jbd2_journal_stop_5979 jbd2_journal_stop 0 5979 NULL
104006 +ntfs_rl_append_6037 ntfs_rl_append 4-2 6037 NULL
104007 +da9052_request_irq_6058 da9052_request_irq 2 6058 NULL nohasharray
104008 +device_add_attributes_6058 device_add_attributes 0 6058 &da9052_request_irq_6058
104009 +alloc_msg_6072 alloc_msg 1 6072 NULL
104010 +sctp_setsockopt_connectx_6073 sctp_setsockopt_connectx 3 6073 NULL
104011 +rts51x_ms_rw_multi_sector_6076 rts51x_ms_rw_multi_sector 3-4 6076 NULL
104012 +md_trim_bio_6078 md_trim_bio 2 6078 NULL
104013 +finish_inode_if_needed_6098 finish_inode_if_needed 0 6098 NULL
104014 +ipmi_addr_length_6110 ipmi_addr_length 0 6110 NULL
104015 +dfs_global_file_write_6112 dfs_global_file_write 3 6112 NULL
104016 +nouveau_parent_create__6131 nouveau_parent_create_ 7 6131 NULL
104017 +__btrfs_commit_inode_delayed_items_6150 __btrfs_commit_inode_delayed_items 0 6150 NULL
104018 +ieee80211_if_fmt_beacon_timeout_6153 ieee80211_if_fmt_beacon_timeout 3 6153 NULL
104019 +ivtv_copy_buf_to_user_6159 ivtv_copy_buf_to_user 4 6159 NULL
104020 +vdma_mem_alloc_6171 vdma_mem_alloc 1 6171 NULL
104021 +wl1251_cmd_template_set_6172 wl1251_cmd_template_set 4 6172 NULL
104022 +paging64_walk_addr_generic_6180 paging64_walk_addr_generic 4 6180 NULL
104023 +SyS_setgroups_6182 SyS_setgroups 1 6182 NULL
104024 +__add_pin_to_irq_node_6188 __add_pin_to_irq_node 2 6188 NULL
104025 +qp_host_get_user_memory_6189 qp_host_get_user_memory 1-2 6189 NULL
104026 +cl_sync_io_wait_6196 cl_sync_io_wait 0 6196 NULL
104027 +i915_gem_execbuffer_move_to_gpu_6197 i915_gem_execbuffer_move_to_gpu 0 6197 NULL
104028 +mxt_show_instance_6207 mxt_show_instance 2-0 6207 NULL
104029 +v4l2_ctrl_new_std_menu_6221 v4l2_ctrl_new_std_menu 4 6221 NULL
104030 +mqueue_read_file_6228 mqueue_read_file 3 6228 NULL
104031 +f_hidg_read_6238 f_hidg_read 3 6238 NULL
104032 +changed_ref_6242 changed_ref 0 6242 NULL
104033 +fbcon_prepare_logo_6246 fbcon_prepare_logo 5 6246 NULL
104034 +ext4_ext_split_6249 ext4_ext_split 0 6249 NULL
104035 +pcpu_next_pop_6277 pcpu_next_pop 4 6277 NULL
104036 +tx_tx_start_null_frame_read_6281 tx_tx_start_null_frame_read 3 6281 NULL
104037 +snd_hda_override_conn_list_6282 snd_hda_override_conn_list 3-0 6282 NULL nohasharray
104038 +xenbus_file_write_6282 xenbus_file_write 3 6282 &snd_hda_override_conn_list_6282
104039 +posix_acl_fix_xattr_to_user_6283 posix_acl_fix_xattr_to_user 2 6283 NULL
104040 +paging64_gva_to_gpa_nested_6287 paging64_gva_to_gpa_nested 2 6287 NULL
104041 +qlcnic_sriov_alloc_bc_msg_6309 qlcnic_sriov_alloc_bc_msg 2 6309 NULL
104042 +exclude_super_stripes_6326 exclude_super_stripes 0 6326 NULL
104043 +SyS_mincore_6329 SyS_mincore 2-1 6329 NULL nohasharray
104044 +hfa384x_inw_6329 hfa384x_inw 0 6329 &SyS_mincore_6329
104045 +fuse_get_req_for_background_6337 fuse_get_req_for_background 2 6337 NULL
104046 +ucs2_strnlen_6342 ucs2_strnlen 0 6342 NULL
104047 +utc2ntfs_6347 utc2ntfs 0 6347 NULL
104048 +regcache_sync_block_raw_6350 regcache_sync_block_raw 5-4 6350 NULL
104049 +mei_dbgfs_read_devstate_6352 mei_dbgfs_read_devstate 3 6352 NULL
104050 +_proc_do_string_6376 _proc_do_string 2 6376 NULL
104051 +osd_req_read_sg_kern_6378 osd_req_read_sg_kern 5 6378 NULL
104052 +msb_write_block_6379 msb_write_block 3 6379 NULL
104053 +BcmFlash2xBulkRead_6395 BcmFlash2xBulkRead 0 6395 NULL
104054 +posix_acl_fix_xattr_userns_6420 posix_acl_fix_xattr_userns 4 6420 NULL
104055 +add_transaction_credits_6422 add_transaction_credits 2-3 6422 NULL
104056 +ipr_change_queue_depth_6431 ipr_change_queue_depth 2 6431 NULL
104057 +__alloc_bootmem_node_nopanic_6432 __alloc_bootmem_node_nopanic 2 6432 NULL
104058 +add_to_list_6433 add_to_list 0 6433 NULL
104059 +paging32_gva_to_gpa_nested_6442 paging32_gva_to_gpa_nested 2 6442 NULL
104060 +i915_gem_object_wait_rendering_6446 i915_gem_object_wait_rendering 0 6446 NULL
104061 +mlx4_ib_reg_user_mr_6471 mlx4_ib_reg_user_mr 2-3 6471 NULL
104062 +ieee80211_if_fmt_dot11MeshMaxRetries_6476 ieee80211_if_fmt_dot11MeshMaxRetries 3 6476 NULL
104063 +qp_memcpy_from_queue_6479 qp_memcpy_from_queue 5-4 6479 NULL
104064 +cipso_v4_map_lvl_hton_6490 cipso_v4_map_lvl_hton 0 6490 NULL
104065 +ntfs_cluster_free_6497 ntfs_cluster_free 0 6497 NULL
104066 +dbg_intr_buf_6501 dbg_intr_buf 2 6501 NULL
104067 +mei_read_6507 mei_read 3 6507 NULL
104068 +__start_delalloc_inodes_6528 __start_delalloc_inodes 0 6528 NULL
104069 +rndis_set_oid_6547 rndis_set_oid 4 6547 NULL
104070 +wdm_read_6549 wdm_read 3 6549 NULL
104071 +dm_stats_create_6551 dm_stats_create 4-2-3 6551 NULL
104072 +fb_alloc_cmap_6554 fb_alloc_cmap 2 6554 NULL
104073 +SyS_semtimedop_6563 SyS_semtimedop 3 6563 NULL
104074 +SyS_fcntl64_6582 SyS_fcntl64 3 6582 NULL
104075 +snd_pcm_hw_refine_old_user_6586 snd_pcm_hw_refine_old_user 0 6586 NULL
104076 +btrfs_start_all_delalloc_inodes_6596 btrfs_start_all_delalloc_inodes 0 6596 NULL
104077 +snmp_mib_init_6604 snmp_mib_init 2-3 6604 NULL
104078 +ecryptfs_filldir_6622 ecryptfs_filldir 3 6622 NULL
104079 +xfs_do_div_6649 xfs_do_div 0-2 6649 NULL
104080 +journal_dirty_metadata_6658 journal_dirty_metadata 0 6658 NULL
104081 +process_rcvd_data_6679 process_rcvd_data 3 6679 NULL
104082 +btrfs_lookup_csums_range_6696 btrfs_lookup_csums_range 2-3-0 6696 NULL
104083 +ps_pspoll_max_apturn_read_6699 ps_pspoll_max_apturn_read 3 6699 NULL
104084 +bnad_debugfs_write_regrd_6706 bnad_debugfs_write_regrd 3 6706 NULL
104085 +mpeg_read_6708 mpeg_read 3 6708 NULL
104086 +ibmpex_query_sensor_count_6709 ibmpex_query_sensor_count 0 6709 NULL
104087 +set_orig_insn_6712 set_orig_insn 3 6712 NULL
104088 +video_proc_write_6724 video_proc_write 3 6724 NULL
104089 +posix_acl_xattr_count_6725 posix_acl_xattr_count 0-1 6725 NULL
104090 +ocfs2_insert_extent_6737 ocfs2_insert_extent 0 6737 NULL
104091 +btrfs_inode_delayed_dir_index_count_6759 btrfs_inode_delayed_dir_index_count 0 6759 NULL
104092 +kobject_add_varg_6781 kobject_add_varg 0 6781 NULL
104093 +iwl_dbgfs_channels_read_6784 iwl_dbgfs_channels_read 3 6784 NULL
104094 +ieee80211_if_read_6785 ieee80211_if_read 3 6785 NULL
104095 +zone_spanned_pages_in_node_6787 zone_spanned_pages_in_node 0-3-4 6787 NULL
104096 +hdlcdrv_register_6792 hdlcdrv_register 2 6792 NULL
104097 +tx_tx_done_data_read_6799 tx_tx_done_data_read 3 6799 NULL
104098 +ocfs2_calc_refcount_meta_credits_6802 ocfs2_calc_refcount_meta_credits 0 6802 NULL
104099 +lbs_rdrf_write_6826 lbs_rdrf_write 3 6826 NULL
104100 +make_8259A_irq_6828 make_8259A_irq 1 6828 NULL
104101 +calc_pages_for_6838 calc_pages_for 0-1-2 6838 NULL
104102 +mon_bin_read_6841 mon_bin_read 3 6841 NULL
104103 +snd_cs4281_BA0_read_6847 snd_cs4281_BA0_read 5 6847 NULL
104104 +ecryptfs_write_lower_6870 ecryptfs_write_lower 4 6870 NULL
104105 +dio_complete_6879 dio_complete 0-2-3 6879 NULL
104106 +ieee80211_if_fmt_path_refresh_time_6888 ieee80211_if_fmt_path_refresh_time 3 6888 NULL nohasharray
104107 +raw_seticmpfilter_6888 raw_seticmpfilter 3 6888 &ieee80211_if_fmt_path_refresh_time_6888
104108 +dlmfs_file_write_6892 dlmfs_file_write 3 6892 NULL
104109 +ext4_inode_bitmap_6902 ext4_inode_bitmap 0 6902 NULL
104110 +spi_show_regs_6911 spi_show_regs 3 6911 NULL nohasharray
104111 +proc_sessionid_read_6911 proc_sessionid_read 3 6911 &spi_show_regs_6911 nohasharray
104112 +acm_alloc_minor_6911 acm_alloc_minor 0 6911 &proc_sessionid_read_6911
104113 +__kfifo_dma_in_finish_r_6913 __kfifo_dma_in_finish_r 2-3 6913 NULL
104114 +do_msgrcv_6921 do_msgrcv 3 6921 NULL
104115 +__vxge_hw_mempool_create_6923 __vxge_hw_mempool_create 3-2-6 6923 NULL
104116 +cache_do_downcall_6926 cache_do_downcall 3 6926 NULL
104117 +ipath_verbs_send_dma_6929 ipath_verbs_send_dma 6 6929 NULL
104118 +qsfp_cks_6945 qsfp_cks 2-0 6945 NULL
104119 +tg3_nvram_write_block_unbuffered_6955 tg3_nvram_write_block_unbuffered 3 6955 NULL
104120 +pch_uart_hal_read_6961 pch_uart_hal_read 0 6961 NULL
104121 +videobuf_dma_init_kernel_6963 videobuf_dma_init_kernel 3 6963 NULL
104122 +i40e_dbg_dump_write_6973 i40e_dbg_dump_write 3 6973 NULL nohasharray
104123 +rsa_extract_mpi_6973 rsa_extract_mpi 5 6973 &i40e_dbg_dump_write_6973
104124 +crypto_authenc_esn_setkey_6985 crypto_authenc_esn_setkey 3 6985 NULL
104125 +request_key_async_6990 request_key_async 4 6990 NULL
104126 +tpl_write_6998 tpl_write 3 6998 NULL
104127 +r871x_set_wpa_ie_7000 r871x_set_wpa_ie 3 7000 NULL
104128 +log_wait_commit_7005 log_wait_commit 0 7005 NULL
104129 +cipso_v4_gentag_enum_7006 cipso_v4_gentag_enum 0 7006 NULL
104130 +tracing_cpumask_read_7010 tracing_cpumask_read 3 7010 NULL
104131 +ld_usb_write_7022 ld_usb_write 3 7022 NULL
104132 +wimax_msg_7030 wimax_msg 4 7030 NULL
104133 +ipath_get_base_info_7043 ipath_get_base_info 3 7043 NULL
104134 +snd_pcm_oss_bytes_7051 snd_pcm_oss_bytes 2 7051 NULL
104135 +hci_sock_recvmsg_7072 hci_sock_recvmsg 4 7072 NULL
104136 +event_enable_read_7074 event_enable_read 3 7074 NULL
104137 +ip_vs_sync_conn_7075 ip_vs_sync_conn 3 7075 NULL
104138 +beacon_interval_read_7091 beacon_interval_read 3 7091 NULL
104139 +pipeline_enc_rx_stat_fifo_int_read_7107 pipeline_enc_rx_stat_fifo_int_read 3 7107 NULL
104140 +check_header_7108 check_header 0 7108 NULL
104141 +osc_resend_count_seq_write_7120 osc_resend_count_seq_write 3 7120 NULL
104142 +qib_format_hwerrors_7133 qib_format_hwerrors 5 7133 NULL
104143 +kvm_mmu_notifier_test_young_7139 kvm_mmu_notifier_test_young 3 7139 NULL
104144 +ipv6_recv_rxpmtu_7142 ipv6_recv_rxpmtu 3 7142 NULL
104145 +qlcnic_enable_msix_7144 qlcnic_enable_msix 2 7144 NULL
104146 +ocfs2_get_left_path_7159 ocfs2_get_left_path 0 7159 NULL
104147 +core_alua_write_tpg_metadata_7168 core_alua_write_tpg_metadata 3 7168 NULL
104148 +ext3_xattr_ibody_list_7201 ext3_xattr_ibody_list 3-0 7201 NULL
104149 +__alloc_objio_seg_7203 __alloc_objio_seg 1 7203 NULL
104150 +get_param_h_7247 get_param_h 0 7247 NULL
104151 +af_alg_make_sg_7254 af_alg_make_sg 3-0 7254 NULL
104152 +hdlc_loop_7255 hdlc_loop 0 7255 NULL
104153 +vm_mmap_pgoff_7259 vm_mmap_pgoff 0 7259 NULL
104154 +snd_mask_refine_7267 snd_mask_refine 0 7267 NULL
104155 +f_midi_start_ep_7270 f_midi_start_ep 0 7270 NULL
104156 +dma_ops_alloc_addresses_7272 dma_ops_alloc_addresses 3-4-5-0 7272 NULL
104157 +rx_rate_rx_frames_per_rates_read_7282 rx_rate_rx_frames_per_rates_read 3 7282 NULL
104158 +get_string_7302 get_string 0 7302 NULL
104159 +wb_remove_mapping_7307 wb_remove_mapping 2 7307 NULL
104160 +wait_on_sync_kiocb_7327 wait_on_sync_kiocb 0 7327 NULL
104161 +mgmt_control_7349 mgmt_control 3 7349 NULL
104162 +max8998_irq_domain_map_7359 max8998_irq_domain_map 2 7359 NULL
104163 +i915_gem_do_execbuffer_7362 i915_gem_do_execbuffer 0 7362 NULL nohasharray
104164 +ext3_free_blocks_7362 ext3_free_blocks 3-4 7362 &i915_gem_do_execbuffer_7362
104165 +at_est2timeout_7365 at_est2timeout 0-1 7365 NULL
104166 +ieee80211_if_read_dot11MeshHWMPactivePathTimeout_7368 ieee80211_if_read_dot11MeshHWMPactivePathTimeout 3 7368 NULL
104167 +schedule_timeout_7371 schedule_timeout 0 7371 NULL
104168 +ath10k_read_fw_stats_7387 ath10k_read_fw_stats 3 7387 NULL
104169 +hweight_long_7388 hweight_long 1-0 7388 NULL
104170 +sl_change_mtu_7396 sl_change_mtu 2 7396 NULL
104171 +_ore_add_stripe_unit_7399 _ore_add_stripe_unit 6-3 7399 NULL
104172 +readb_7401 readb 0 7401 NULL
104173 +drm_property_create_blob_7414 drm_property_create_blob 2 7414 NULL
104174 +ip_options_get_alloc_7448 ip_options_get_alloc 1 7448 NULL
104175 +SYSC_setgroups_7454 SYSC_setgroups 1 7454 NULL
104176 +rt2x00debug_read_queue_stats_7455 rt2x00debug_read_queue_stats 3 7455 NULL
104177 +l2tp_ip6_sendmsg_7461 l2tp_ip6_sendmsg 4 7461 NULL
104178 +ReadHSCX_7471 ReadHSCX 0 7471 NULL nohasharray
104179 +garp_request_join_7471 garp_request_join 4 7471 &ReadHSCX_7471
104180 +snd_pcm_lib_read1_7491 snd_pcm_lib_read1 0-3 7491 NULL
104181 +iwl_mvm_power_dbgfs_read_7502 iwl_mvm_power_dbgfs_read 0 7502 NULL
104182 +sdhci_alloc_host_7509 sdhci_alloc_host 2 7509 NULL nohasharray
104183 +ahash_instance_headroom_7509 ahash_instance_headroom 0 7509 &sdhci_alloc_host_7509
104184 +array_zalloc_7519 array_zalloc 2-1 7519 NULL
104185 +btrfs_insert_xattr_item_7545 btrfs_insert_xattr_item 0 7545 NULL
104186 +goal_in_my_reservation_7553 goal_in_my_reservation 3 7553 NULL
104187 +smk_read_mapped_7562 smk_read_mapped 3 7562 NULL
104188 +cfs_cpt_num_estimate_7571 cfs_cpt_num_estimate 0 7571 NULL
104189 +ext4_ext_insert_extent_7576 ext4_ext_insert_extent 0 7576 NULL
104190 +btrfs_block_rsv_add_7579 btrfs_block_rsv_add 3-0 7579 NULL
104191 +ext3_try_to_allocate_7590 ext3_try_to_allocate 5-3-0 7590 NULL
104192 +ocfs2_lock_create_7612 ocfs2_lock_create 0 7612 NULL
104193 +create_dir_7614 create_dir 0 7614 NULL nohasharray
104194 +groups_alloc_7614 groups_alloc 1 7614 &create_dir_7614
104195 +_rtw_zmalloc_7636 _rtw_zmalloc 1 7636 NULL
104196 +fault_inject_write_7662 fault_inject_write 3 7662 NULL
104197 +acpi_ex_allocate_name_string_7685 acpi_ex_allocate_name_string 1-2 7685 NULL
104198 +acpi_ns_get_pathname_length_7699 acpi_ns_get_pathname_length 0 7699 NULL
104199 +dev_write_7708 dev_write 3 7708 NULL
104200 +unmap_region_7709 unmap_region 1 7709 NULL
104201 +dbg_check_cats_7713 dbg_check_cats 0 7713 NULL
104202 +pci_raw_set_power_state_7729 pci_raw_set_power_state 0 7729 NULL
104203 +vxge_device_register_7752 vxge_device_register 4 7752 NULL
104204 +ubi_io_read_vid_hdr_7766 ubi_io_read_vid_hdr 0 7766 NULL
104205 +paths_from_inode_7774 paths_from_inode 0 7774 NULL
104206 +alloc_candev_7776 alloc_candev 1-2 7776 NULL
104207 +dfs_global_file_read_7787 dfs_global_file_read 3 7787 NULL
104208 +bnx2_nvram_write_7790 bnx2_nvram_write 2-4 7790 NULL
104209 +diva_os_copy_from_user_7792 diva_os_copy_from_user 4 7792 NULL nohasharray
104210 +lustre_packed_msg_size_7792 lustre_packed_msg_size 0 7792 &diva_os_copy_from_user_7792
104211 +ubifs_leb_read_7828 ubifs_leb_read 0 7828 NULL
104212 +cfs_trace_dump_debug_buffer_usrstr_7861 cfs_trace_dump_debug_buffer_usrstr 2 7861 NULL
104213 +tipc_alloc_entry_7875 tipc_alloc_entry 2 7875 NULL
104214 +btrfs_find_space_for_alloc_7876 btrfs_find_space_for_alloc 2 7876 NULL
104215 +config_desc_7878 config_desc 0 7878 NULL
104216 +dvb_dmxdev_read_sec_7892 dvb_dmxdev_read_sec 4 7892 NULL
104217 +xfs_trans_get_efi_7898 xfs_trans_get_efi 2 7898 NULL
104218 +ext3_group_extend_7911 ext3_group_extend 3 7911 NULL
104219 +libfc_host_alloc_7917 libfc_host_alloc 2 7917 NULL
104220 +f_hidg_write_7932 f_hidg_write 3 7932 NULL
104221 +io_apic_setup_irq_pin_once_7934 io_apic_setup_irq_pin_once 1-2 7934 NULL
104222 +compare_refs_7938 compare_refs 0 7938 NULL
104223 +fsnotify_7943 fsnotify 0 7943 NULL
104224 +hash_netiface6_expire_7944 hash_netiface6_expire 3 7944 NULL
104225 +integrity_digsig_verify_7956 integrity_digsig_verify 3 7956 NULL
104226 +smk_write_load_self_7958 smk_write_load_self 3 7958 NULL
104227 +bch_btree_to_text_7960 bch_btree_to_text 2 7960 NULL
104228 +fixup_inode_link_count_8002 fixup_inode_link_count 0 8002 NULL
104229 +extend_or_restart_transaction_8008 extend_or_restart_transaction 0 8008 NULL
104230 +tt3650_ci_msg_locked_8013 tt3650_ci_msg_locked 4 8013 NULL
104231 +vcs_read_8017 vcs_read 3 8017 NULL
104232 +vhost_add_used_and_signal_n_8038 vhost_add_used_and_signal_n 4 8038 NULL
104233 +ms_read_multiple_pages_8052 ms_read_multiple_pages 5-4 8052 NULL
104234 +dgrp_mon_read_8065 dgrp_mon_read 3 8065 NULL
104235 +leb_read_lock_8070 leb_read_lock 0 8070 NULL
104236 +spi_write_then_read_8073 spi_write_then_read 5-3 8073 NULL
104237 +alloc_targets_8074 alloc_targets 2 8074 NULL nohasharray
104238 +qla4xxx_post_ping_evt_work_8074 qla4xxx_post_ping_evt_work 4 8074 &alloc_targets_8074
104239 +ext4_ext_map_blocks_8078 ext4_ext_map_blocks 0 8078 NULL
104240 +venus_lookup_8121 venus_lookup 4 8121 NULL
104241 +ieee80211_if_fmt_num_buffered_multicast_8127 ieee80211_if_fmt_num_buffered_multicast 3 8127 NULL
104242 +xfs_iomap_prealloc_size_8141 xfs_iomap_prealloc_size 3 8141 NULL
104243 +dma_map_area_8178 dma_map_area 3-2-5-0 8178 NULL
104244 +ore_truncate_8181 ore_truncate 3 8181 NULL
104245 +__sk_mem_schedule_8185 __sk_mem_schedule 2 8185 NULL
104246 +ieee80211_if_fmt_dot11MeshHoldingTimeout_8187 ieee80211_if_fmt_dot11MeshHoldingTimeout 3 8187 NULL
104247 +update_cowonly_root_8200 update_cowonly_root 0 8200 NULL
104248 +recent_mt_proc_write_8206 recent_mt_proc_write 3 8206 NULL
104249 +__ocfs2_lock_refcount_tree_8207 __ocfs2_lock_refcount_tree 0 8207 NULL
104250 +rt2x00debug_write_bbp_8212 rt2x00debug_write_bbp 3 8212 NULL
104251 +ad7879_spi_multi_read_8218 ad7879_spi_multi_read 3 8218 NULL
104252 +play_iframe_8219 play_iframe 3 8219 NULL
104253 +create_log_8225 create_log 2 8225 NULL nohasharray
104254 +kvm_mmu_page_set_gfn_8225 kvm_mmu_page_set_gfn 2 8225 &create_log_8225
104255 +sctp_ssnmap_size_8228 sctp_ssnmap_size 0-1-2 8228 NULL
104256 +ceph_sync_write_8233 ceph_sync_write 4 8233 NULL
104257 +bnx2x_iov_get_max_queue_count_8235 bnx2x_iov_get_max_queue_count 0 8235 NULL
104258 +check_xattr_ref_inode_8244 check_xattr_ref_inode 0 8244 NULL
104259 +t3_init_l2t_8261 t3_init_l2t 1 8261 NULL
104260 +init_cdev_8274 init_cdev 1 8274 NULL
104261 +rproc_recovery_write_8281 rproc_recovery_write 3 8281 NULL
104262 +qib_decode_7220_err_8315 qib_decode_7220_err 3 8315 NULL
104263 +snd_pcm_update_state_8320 snd_pcm_update_state 0 8320 NULL
104264 +construct_key_and_link_8321 construct_key_and_link 4 8321 NULL
104265 +ipwireless_send_packet_8328 ipwireless_send_packet 4 8328 NULL
104266 +cfs_cpt_spread_node_8338 cfs_cpt_spread_node 0 8338 NULL
104267 +tracing_entries_read_8345 tracing_entries_read 3 8345 NULL
104268 +ieee80211_if_fmt_ht_opmode_8347 ieee80211_if_fmt_ht_opmode 3 8347 NULL
104269 +generic_write_sync_8358 generic_write_sync 0 8358 NULL
104270 +ping_getfrag_8360 ping_getfrag 4-3 8360 NULL
104271 +ath6kl_lrssi_roam_write_8362 ath6kl_lrssi_roam_write 3 8362 NULL
104272 +ocfs2_decrease_refcount_rec_8385 ocfs2_decrease_refcount_rec 0 8385 NULL
104273 +xdi_copy_from_user_8395 xdi_copy_from_user 4 8395 NULL
104274 +zd_rf_scnprint_id_8406 zd_rf_scnprint_id 0-3 8406 NULL
104275 +smk_write_change_rule_8411 smk_write_change_rule 3 8411 NULL nohasharray
104276 +uvc_v4l2_ioctl_8411 uvc_v4l2_ioctl 2 8411 &smk_write_change_rule_8411
104277 +pca953x_gpio_to_irq_8424 pca953x_gpio_to_irq 2 8424 NULL
104278 +irq_create_mapping_8437 irq_create_mapping 2-0 8437 NULL
104279 +generic_bin_search_8440 generic_bin_search 0 8440 NULL
104280 +ring_wait_for_space_8457 ring_wait_for_space 0 8457 NULL
104281 +dir_changed_8471 dir_changed 0 8471 NULL
104282 +afs_cell_lookup_8482 afs_cell_lookup 2 8482 NULL
104283 +fore200e_chunk_alloc_8501 fore200e_chunk_alloc 4-3 8501 NULL
104284 +batadv_tt_len_8502 batadv_tt_len 0-1 8502 NULL
104285 +dev_config_8506 dev_config 3 8506 NULL
104286 +ACL_to_cifs_posix_8509 ACL_to_cifs_posix 3 8509 NULL
104287 +opticon_process_data_packet_8524 opticon_process_data_packet 3 8524 NULL
104288 +alloc_pg_vec_8533 alloc_pg_vec 2 8533 NULL
104289 +ocfs2_read_virt_blocks_8538 ocfs2_read_virt_blocks 2-3 8538 NULL
104290 +user_on_off_8552 user_on_off 2 8552 NULL
104291 +profile_remove_8556 profile_remove 3 8556 NULL
104292 +remove_extent_backref_8559 remove_extent_backref 0 8559 NULL
104293 +cache_slow_downcall_8570 cache_slow_downcall 2 8570 NULL
104294 +isr_dma0_done_read_8574 isr_dma0_done_read 3 8574 NULL
104295 +tower_write_8580 tower_write 3 8580 NULL
104296 +ocfs2_reserve_local_alloc_bits_8581 ocfs2_reserve_local_alloc_bits 0 8581 NULL
104297 +cfs_cpt_number_8618 cfs_cpt_number 0 8618 NULL
104298 +shash_setkey_unaligned_8620 shash_setkey_unaligned 3 8620 NULL
104299 +iommu_unmap_8624 iommu_unmap 2-0 8624 NULL
104300 +it821x_firmware_command_8628 it821x_firmware_command 3 8628 NULL
104301 +scsi_dma_map_8632 scsi_dma_map 0 8632 NULL
104302 +fuse_send_write_pages_8636 fuse_send_write_pages 0-5 8636 NULL
104303 +generic_acl_set_8658 generic_acl_set 4 8658 NULL
104304 +mlx5_vzalloc_8663 mlx5_vzalloc 1 8663 NULL
104305 +dio_bio_alloc_8677 dio_bio_alloc 5 8677 NULL
104306 +lbs_bcnmiss_read_8678 lbs_bcnmiss_read 3 8678 NULL
104307 +tc3589x_gpio_irq_unmap_8680 tc3589x_gpio_irq_unmap 2 8680 NULL
104308 +rproc_trace_read_8686 rproc_trace_read 3 8686 NULL
104309 +skb_frag_size_8695 skb_frag_size 0 8695 NULL
104310 +arcfb_write_8702 arcfb_write 3 8702 NULL
104311 +i_size_read_8703 i_size_read 0 8703 NULL nohasharray
104312 +init_header_8703 init_header 0 8703 &i_size_read_8703
104313 +HDLC_irq_8709 HDLC_irq 2 8709 NULL
104314 +ctrl_out_8712 ctrl_out 5-3 8712 NULL
104315 +__create_free_space_inode_8724 __create_free_space_inode 0 8724 NULL
104316 +tracing_max_lat_write_8728 tracing_max_lat_write 3 8728 NULL
104317 +jffs2_acl_count_8729 jffs2_acl_count 0-1 8729 NULL
104318 +f_dupfd_8730 f_dupfd 1 8730 NULL
104319 +__create_irqs_8733 __create_irqs 2-1-0-3 8733 NULL
104320 +pca953x_gpio_irq_map_8737 pca953x_gpio_irq_map 2 8737 NULL
104321 +tx_tx_exch_expiry_read_8749 tx_tx_exch_expiry_read 3 8749 NULL
104322 +compound_order_8750 compound_order 0 8750 NULL
104323 +ocfs2_find_path_8754 ocfs2_find_path 0 8754 NULL
104324 +yurex_write_8761 yurex_write 3 8761 NULL
104325 +joydev_compat_ioctl_8765 joydev_compat_ioctl 2 8765 NULL
104326 +x32_arch_ptrace_8767 x32_arch_ptrace 3 8767 NULL
104327 +kstrtoint_from_user_8778 kstrtoint_from_user 2 8778 NULL
104328 +paging32_prefetch_gpte_8783 paging32_prefetch_gpte 4 8783 NULL
104329 +ext4_try_to_write_inline_data_8785 ext4_try_to_write_inline_data 3-4 8785 NULL
104330 +aligned_nrpages_8791 aligned_nrpages 0-1-2 8791 NULL
104331 +__bitmap_weight_8796 __bitmap_weight 0-2 8796 NULL
104332 +cpuset_common_file_read_8800 cpuset_common_file_read 5 8800 NULL
104333 +intel_ring_begin_8808 intel_ring_begin 0 8808 NULL
104334 +ntfs_commit_pages_after_write_8809 ntfs_commit_pages_after_write 4-3 8809 NULL
104335 +metronomefb_write_8823 metronomefb_write 3 8823 NULL
104336 +SyS_llistxattr_8824 SyS_llistxattr 3 8824 NULL
104337 +get_queue_depth_8833 get_queue_depth 0 8833 NULL
104338 +dvb_ringbuffer_pkt_next_8834 dvb_ringbuffer_pkt_next 0-2 8834 NULL
104339 +usb_ep_queue_8839 usb_ep_queue 0 8839 NULL
104340 +clear_bitset_8840 clear_bitset 2 8840 NULL
104341 +debug_debug1_read_8856 debug_debug1_read 3 8856 NULL
104342 +wa_nep_queue_8858 wa_nep_queue 2 8858 NULL
104343 +sk_memory_allocated_8882 sk_memory_allocated 0 8882 NULL
104344 +compressed_bio_size_8887 compressed_bio_size 0-2 8887 NULL
104345 +ab3100_get_set_reg_8890 ab3100_get_set_reg 3 8890 NULL nohasharray
104346 +tracing_max_lat_read_8890 tracing_max_lat_read 3 8890 &ab3100_get_set_reg_8890
104347 +xfs_contig_bits_8904 xfs_contig_bits 3-0 8904 NULL
104348 +sdio_max_byte_size_8907 sdio_max_byte_size 0 8907 NULL
104349 +sysfs_merge_group_8917 sysfs_merge_group 0 8917 NULL
104350 +write_file_ani_8918 write_file_ani 3 8918 NULL
104351 +layout_commit_8926 layout_commit 3 8926 NULL
104352 +adjust_priv_size_8935 adjust_priv_size 0-1 8935 NULL
104353 +driver_stats_read_8944 driver_stats_read 3 8944 NULL
104354 +lookup_zone_8956 lookup_zone 2 8956 NULL
104355 +read_file_tgt_stats_8959 read_file_tgt_stats 3 8959 NULL
104356 +seq_bitmap_list_8963 seq_bitmap_list 3 8963 NULL
104357 +usb_allocate_stream_buffers_8964 usb_allocate_stream_buffers 3 8964 NULL
104358 +qib_qsfp_dump_8966 qib_qsfp_dump 0-3 8966 NULL
104359 +venus_mkdir_8967 venus_mkdir 4 8967 NULL
104360 +seq_open_net_8968 seq_open_net 4 8968 NULL nohasharray
104361 +vol_cdev_read_8968 vol_cdev_read 3 8968 &seq_open_net_8968
104362 +bio_integrity_get_tag_8974 bio_integrity_get_tag 3 8974 NULL
104363 +btrfs_alloc_free_block_8986 btrfs_alloc_free_block 3-8 8986 NULL
104364 +jbd2_journal_blocks_per_page_9004 jbd2_journal_blocks_per_page 0 9004 NULL
104365 +il_dbgfs_clear_ucode_stats_write_9016 il_dbgfs_clear_ucode_stats_write 3 9016 NULL
104366 +sparse_index_alloc_9021 sparse_index_alloc 1 9021 NULL
104367 +snd_emu10k1_ptr_read_9026 snd_emu10k1_ptr_read 0-2 9026 NULL
104368 +fd_ioctl_9028 fd_ioctl 3 9028 NULL
104369 +nla_put_9042 nla_put 3 9042 NULL
104370 +offset_il_node_9059 offset_il_node 0 9059 NULL
104371 +snd_emu10k1_synth_copy_from_user_9061 snd_emu10k1_synth_copy_from_user 5-3 9061 NULL
104372 +snd_gus_dram_peek_9062 snd_gus_dram_peek 4 9062 NULL
104373 +fib_info_hash_alloc_9075 fib_info_hash_alloc 1 9075 NULL
104374 +create_queues_9088 create_queues 3-2 9088 NULL
104375 +ftdi_prepare_write_buffer_9093 ftdi_prepare_write_buffer 3 9093 NULL
104376 +adxl34x_spi_read_block_9108 adxl34x_spi_read_block 3 9108 NULL
104377 +alloc_slabmgmt_9109 alloc_slabmgmt 5 9109 NULL
104378 +caif_stream_sendmsg_9110 caif_stream_sendmsg 4 9110 NULL nohasharray
104379 +gfn_to_rmap_9110 gfn_to_rmap 3-2 9110 &caif_stream_sendmsg_9110
104380 +udf_direct_IO_9111 udf_direct_IO 4 9111 NULL
104381 +pmcraid_change_queue_depth_9116 pmcraid_change_queue_depth 2 9116 NULL
104382 +mq_remove_mapping_9124 mq_remove_mapping 2 9124 NULL
104383 +mlx4_alloc_resize_umem_9132 mlx4_alloc_resize_umem 3 9132 NULL
104384 +ext4_list_backups_9138 ext4_list_backups 0 9138 NULL
104385 +apei_resources_merge_9149 apei_resources_merge 0 9149 NULL
104386 +vb2_dma_sg_alloc_9157 vb2_dma_sg_alloc 2 9157 NULL
104387 +dbg_command_buf_9165 dbg_command_buf 2 9165 NULL
104388 +__btrfs_add_delayed_deletion_item_9177 __btrfs_add_delayed_deletion_item 0 9177 NULL
104389 +isr_irqs_read_9181 isr_irqs_read 3 9181 NULL
104390 +count_leading_zeros_9183 count_leading_zeros 0 9183 NULL
104391 +altera_swap_ir_9194 altera_swap_ir 2 9194 NULL
104392 +snd_m3_get_pointer_9206 snd_m3_get_pointer 0 9206 NULL
104393 +get_pfn_9207 get_pfn 1 9207 NULL
104394 +virtqueue_add_9217 virtqueue_add 4-5 9217 NULL
104395 +tx_tx_prepared_descs_read_9221 tx_tx_prepared_descs_read 3 9221 NULL
104396 +sctp_getsockopt_delayed_ack_9232 sctp_getsockopt_delayed_ack 2 9232 NULL
104397 +ext4_mark_iloc_dirty_9239 ext4_mark_iloc_dirty 0 9239 NULL
104398 +ocfs2_clear_ext_refcount_9256 ocfs2_clear_ext_refcount 4-0 9256 NULL
104399 +hfsplus_bnode_read_u16_9262 hfsplus_bnode_read_u16 0 9262 NULL
104400 +btrfs_search_slot_9264 btrfs_search_slot 0 9264 NULL
104401 +ocfs2_merge_rec_right_9267 ocfs2_merge_rec_right 0 9267 NULL
104402 +sparse_early_usemaps_alloc_node_9269 sparse_early_usemaps_alloc_node 4 9269 NULL
104403 +hdpvr_read_9273 hdpvr_read 3 9273 NULL
104404 +flakey_status_9274 flakey_status 5 9274 NULL
104405 +iwl_dbgfs_stations_read_9309 iwl_dbgfs_stations_read 3 9309 NULL
104406 +ceph_sync_setxattr_9310 ceph_sync_setxattr 4 9310 NULL
104407 +ieee80211_if_fmt_txpower_9334 ieee80211_if_fmt_txpower 3 9334 NULL
104408 +nvme_trans_fmt_get_parm_header_9340 nvme_trans_fmt_get_parm_header 2 9340 NULL
104409 +ocfs2_orphan_for_truncate_9342 ocfs2_orphan_for_truncate 4 9342 NULL
104410 +__ksm_enter_9347 __ksm_enter 0 9347 NULL
104411 +ll_direct_rw_pages_9361 ll_direct_rw_pages 0 9361 NULL
104412 +of_node_to_nid_9367 of_node_to_nid 0 9367 NULL
104413 +sta_beacon_loss_count_read_9370 sta_beacon_loss_count_read 3 9370 NULL
104414 +get_request_type_9393 get_request_type 0 9393 NULL nohasharray
104415 +mlx4_bitmap_init_9393 mlx4_bitmap_init 5-2 9393 &get_request_type_9393
104416 +virtqueue_add_outbuf_9395 virtqueue_add_outbuf 3 9395 NULL
104417 +read_9397 read 3 9397 NULL
104418 +btrfs_drop_extents_9423 btrfs_drop_extents 4-0 9423 NULL
104419 +bm_realloc_pages_9431 bm_realloc_pages 2 9431 NULL
104420 +ffs_ep0_write_9438 ffs_ep0_write 3 9438 NULL
104421 +ieee80211_if_fmt_fwded_unicast_9454 ieee80211_if_fmt_fwded_unicast 3 9454 NULL
104422 +ext3_xattr_set_acl_9467 ext3_xattr_set_acl 4 9467 NULL
104423 +agp_generic_alloc_user_9470 agp_generic_alloc_user 1 9470 NULL nohasharray
104424 +get_registers_9470 get_registers 4 9470 &agp_generic_alloc_user_9470
104425 +btrfs_csum_file_blocks_9491 btrfs_csum_file_blocks 0 9491 NULL
104426 +crypt_status_9492 crypt_status 5 9492 NULL
104427 +lbs_threshold_write_9502 lbs_threshold_write 5 9502 NULL
104428 +btrfs_add_delayed_data_ref_9508 btrfs_add_delayed_data_ref 0 9508 NULL
104429 +lp_write_9511 lp_write 3 9511 NULL
104430 +mext_calc_swap_extents_9517 mext_calc_swap_extents 4 9517 NULL
104431 +scsi_tgt_kspace_exec_9522 scsi_tgt_kspace_exec 8 9522 NULL
104432 +ll_max_read_ahead_whole_mb_seq_write_9528 ll_max_read_ahead_whole_mb_seq_write 3 9528 NULL
104433 +read_file_dma_9530 read_file_dma 3 9530 NULL
104434 +ext3_alloc_branch_9534 ext3_alloc_branch 5 9534 NULL
104435 +iwl_dbgfs_bf_params_read_9542 iwl_dbgfs_bf_params_read 3 9542 NULL
104436 +unmerge_ksm_pages_9546 unmerge_ksm_pages 0 9546 NULL nohasharray
104437 +il_dbgfs_missed_beacon_write_9546 il_dbgfs_missed_beacon_write 3 9546 &unmerge_ksm_pages_9546
104438 +compat_SyS_pwritev64_9548 compat_SyS_pwritev64 3 9548 NULL
104439 +readl_9557 readl 0 9557 NULL
104440 +fw_node_create_9559 fw_node_create 2 9559 NULL
104441 +ipath_get_user_pages_9561 ipath_get_user_pages 1-2 9561 NULL
104442 +kobj_map_9566 kobj_map 3-2 9566 NULL
104443 +f2fs_read_data_pages_9574 f2fs_read_data_pages 4 9574 NULL
104444 +use_block_rsv_9597 use_block_rsv 3 9597 NULL
104445 +snd_emu10k1_fx8010_read_9605 snd_emu10k1_fx8010_read 5-6 9605 NULL
104446 +ocfs2_claim_suballoc_bits_9615 ocfs2_claim_suballoc_bits 0 9615 NULL
104447 +lov_ost_pool_add_9626 lov_ost_pool_add 3 9626 NULL
104448 +saa7164_buffer_alloc_user_9627 saa7164_buffer_alloc_user 2 9627 NULL
104449 +ceph_copy_user_to_page_vector_9635 ceph_copy_user_to_page_vector 3-4 9635 NULL
104450 +acpi_ex_insert_into_field_9638 acpi_ex_insert_into_field 3 9638 NULL
104451 +compat_sys_keyctl_9639 compat_sys_keyctl 4 9639 NULL
104452 +ll_checksum_seq_write_9648 ll_checksum_seq_write 3 9648 NULL
104453 +ocfs2_xattr_get_rec_9652 ocfs2_xattr_get_rec 0 9652 NULL
104454 +queue_received_packet_9657 queue_received_packet 5 9657 NULL
104455 +snd_opl4_mem_proc_write_9670 snd_opl4_mem_proc_write 5 9670 NULL
104456 +dns_query_9676 dns_query 3 9676 NULL
104457 +qib_7322_handle_hwerrors_9678 qib_7322_handle_hwerrors 3 9678 NULL
104458 +__erst_read_from_storage_9690 __erst_read_from_storage 0 9690 NULL
104459 +vx_transfer_end_9701 vx_transfer_end 0 9701 NULL
104460 +fnb_9703 fnb 2-3-0 9703 NULL
104461 +fuse_iter_npages_9705 fuse_iter_npages 0 9705 NULL nohasharray
104462 +ieee80211_if_read_aid_9705 ieee80211_if_read_aid 3 9705 &fuse_iter_npages_9705
104463 +cfg80211_tx_mlme_mgmt_9715 cfg80211_tx_mlme_mgmt 3 9715 NULL
104464 +btrfs_stack_file_extent_num_bytes_9720 btrfs_stack_file_extent_num_bytes 0 9720 NULL
104465 +SYSC_ppoll_9721 SYSC_ppoll 2 9721 NULL
104466 +nla_get_u8_9736 nla_get_u8 0 9736 NULL
104467 +ieee80211_if_fmt_num_mcast_sta_9738 ieee80211_if_fmt_num_mcast_sta 3 9738 NULL
104468 +shmem_replace_page_9740 shmem_replace_page 0 9740 NULL
104469 +ddb_input_read_9743 ddb_input_read 3-0 9743 NULL
104470 +sta_last_ack_signal_read_9751 sta_last_ack_signal_read 3 9751 NULL
104471 +ni_calculate_power_boost_limit_9757 ni_calculate_power_boost_limit 3 9757 NULL
104472 +btrfs_super_root_9763 btrfs_super_root 0 9763 NULL
104473 +__alloc_percpu_9764 __alloc_percpu 2-1 9764 NULL
104474 +btrfs_write_and_wait_transaction_9768 btrfs_write_and_wait_transaction 0 9768 NULL
104475 +__blk_queue_init_tags_9778 __blk_queue_init_tags 2 9778 NULL
104476 +snd_mem_proc_write_9786 snd_mem_proc_write 3 9786 NULL
104477 +kvm_age_hva_9795 kvm_age_hva 2 9795 NULL
104478 +parse_uac2_sample_rate_range_9801 parse_uac2_sample_rate_range 0 9801 NULL
104479 +tpm_data_in_9802 tpm_data_in 0 9802 NULL
104480 +ttm_bo_fbdev_io_9805 ttm_bo_fbdev_io 4 9805 NULL
104481 +udpv6_recvmsg_9813 udpv6_recvmsg 4 9813 NULL nohasharray
104482 +ieee80211_if_read_state_9813 ieee80211_if_read_state 3 9813 &udpv6_recvmsg_9813
104483 +svc_pool_map_get_node_9817 svc_pool_map_get_node 0 9817 NULL
104484 +dvb_dvr_set_buffer_size_9840 dvb_dvr_set_buffer_size 2 9840 NULL
104485 +pmcraid_alloc_sglist_9864 pmcraid_alloc_sglist 1 9864 NULL
104486 +btrfs_free_reserved_extent_9867 btrfs_free_reserved_extent 2 9867 NULL
104487 +f1x_translate_sysaddr_to_cs_9868 f1x_translate_sysaddr_to_cs 2 9868 NULL
104488 +mlx4_bitmap_alloc_range_9876 mlx4_bitmap_alloc_range 2-3 9876 NULL
104489 +wil_read_file_ioblob_9878 wil_read_file_ioblob 3 9878 NULL
104490 +bm_register_write_9893 bm_register_write 3 9893 NULL nohasharray
104491 +snd_midi_event_new_9893 snd_midi_event_new 1 9893 &bm_register_write_9893
104492 +snd_gf1_pcm_playback_copy_9895 snd_gf1_pcm_playback_copy 5-3 9895 NULL
104493 +nonpaging_page_fault_9908 nonpaging_page_fault 2 9908 NULL
104494 +ext4_map_blocks_9916 ext4_map_blocks 0 9916 NULL
104495 +root_nfs_parse_options_9937 root_nfs_parse_options 3 9937 NULL
104496 +tc3589x_reg_read_9940 tc3589x_reg_read 0 9940 NULL
104497 +pstore_ftrace_knob_read_9947 pstore_ftrace_knob_read 3 9947 NULL
104498 +read_file_misc_9948 read_file_misc 3 9948 NULL
104499 +ext2_new_blocks_9954 ext2_new_blocks 2-0 9954 NULL
104500 +csum_partial_copy_fromiovecend_9957 csum_partial_copy_fromiovecend 4-3 9957 NULL
104501 +SyS_gethostname_9964 SyS_gethostname 2 9964 NULL
104502 +get_free_serial_index_9969 get_free_serial_index 0 9969 NULL
104503 +btrfs_add_link_9973 btrfs_add_link 5-0 9973 NULL
104504 +SYSC_move_pages_9986 SYSC_move_pages 2 9986 NULL
104505 +aat2870_dump_reg_10019 aat2870_dump_reg 0 10019 NULL
104506 +batadv_orig_hash_add_if_10033 batadv_orig_hash_add_if 2 10033 NULL
104507 +iterate_inodes_from_logical_10037 iterate_inodes_from_logical 0 10037 NULL
104508 +vm_iomap_memory_10038 vm_iomap_memory 2 10038 NULL
104509 +ieee80211_set_probe_resp_10077 ieee80211_set_probe_resp 3 10077 NULL
104510 +ufs_bitmap_search_10105 ufs_bitmap_search 0-3 10105 NULL
104511 +get_elem_size_10110 get_elem_size 0-2 10110 NULL nohasharray
104512 +dynamic_ps_timeout_read_10110 dynamic_ps_timeout_read 3 10110 &get_elem_size_10110
104513 +gfs2_meta_read_10112 gfs2_meta_read 0 10112 NULL
104514 +SyS_migrate_pages_10134 SyS_migrate_pages 2 10134 NULL nohasharray
104515 +offset_to_bit_10134 offset_to_bit 0-2-1-3 10134 &SyS_migrate_pages_10134
104516 +aes_decrypt_packets_read_10155 aes_decrypt_packets_read 3 10155 NULL
104517 +rx_out_of_mem_read_10157 rx_out_of_mem_read 3 10157 NULL
104518 +hidg_alloc_ep_req_10159 hidg_alloc_ep_req 2 10159 NULL nohasharray
104519 +ol_chunk_entries_10159 ol_chunk_entries 0 10159 &hidg_alloc_ep_req_10159
104520 +stmpe_irq_unmap_10164 stmpe_irq_unmap 2 10164 NULL
104521 +asd_store_update_bios_10165 asd_store_update_bios 4 10165 NULL
104522 +kstrtol_from_user_10168 kstrtol_from_user 2 10168 NULL
104523 +proc_pid_attr_read_10173 proc_pid_attr_read 3 10173 NULL
104524 +jffs2_user_setxattr_10182 jffs2_user_setxattr 4 10182 NULL
104525 +register_ftrace_function_10218 register_ftrace_function 0 10218 NULL
104526 +hdlc_rpr_irq_10240 hdlc_rpr_irq 2 10240 NULL
104527 +mutex_lock_killable_10244 mutex_lock_killable 0 10244 NULL
104528 +insert_orphan_item_10249 insert_orphan_item 0 10249 NULL
104529 +cciss_proc_write_10259 cciss_proc_write 3 10259 NULL
104530 +gpiochip_add_to_list_10262 gpiochip_add_to_list 0 10262 NULL
104531 +__qlcnic_pci_sriov_enable_10281 __qlcnic_pci_sriov_enable 2 10281 NULL
104532 +snd_rme9652_capture_copy_10287 snd_rme9652_capture_copy 5 10287 NULL
104533 +ubi_leb_change_10289 ubi_leb_change 0-4 10289 NULL
104534 +read_emulate_10310 read_emulate 2-4 10310 NULL
104535 +read_file_spectral_count_10320 read_file_spectral_count 3 10320 NULL
104536 +ttm_object_device_init_10321 ttm_object_device_init 2 10321 NULL
104537 +compat_SyS_writev_10327 compat_SyS_writev 3 10327 NULL
104538 +ubi_leb_read_10328 ubi_leb_read 0 10328 NULL
104539 +tun_sendmsg_10337 tun_sendmsg 4 10337 NULL
104540 +get_dump_page_10338 get_dump_page 1 10338 NULL
104541 +ufx_alloc_urb_list_10349 ufx_alloc_urb_list 3 10349 NULL
104542 +whci_add_cap_10350 whci_add_cap 0 10350 NULL
104543 +dbAllocAny_10354 dbAllocAny 0 10354 NULL
104544 +ath6kl_listen_int_read_10355 ath6kl_listen_int_read 3 10355 NULL
104545 +__ntfs_cluster_free_10360 __ntfs_cluster_free 0 10360 NULL
104546 +ms_write_multiple_pages_10362 ms_write_multiple_pages 6-5 10362 NULL
104547 +sta_ht_capa_read_10366 sta_ht_capa_read 3 10366 NULL
104548 +ecryptfs_decode_and_decrypt_filename_10379 ecryptfs_decode_and_decrypt_filename 5 10379 NULL
104549 +do_compat_pselect_10398 do_compat_pselect 1 10398 NULL
104550 +fwtty_rx_10434 fwtty_rx 3 10434 NULL
104551 +event_phy_transmit_error_read_10471 event_phy_transmit_error_read 3 10471 NULL
104552 +ext4_itable_unused_count_10501 ext4_itable_unused_count 0 10501 NULL
104553 +qib_alloc_fast_reg_page_list_10507 qib_alloc_fast_reg_page_list 2 10507 NULL
104554 +sel_write_disable_10511 sel_write_disable 3 10511 NULL
104555 +osd_req_write_sg_kern_10514 osd_req_write_sg_kern 5 10514 NULL
104556 +rds_message_alloc_10517 rds_message_alloc 1 10517 NULL
104557 +qlcnic_pci_sriov_enable_10519 qlcnic_pci_sriov_enable 2 10519 NULL nohasharray
104558 +hash_netiface4_expire_10519 hash_netiface4_expire 3 10519 &qlcnic_pci_sriov_enable_10519
104559 +snd_pcm_hw_params_user_10520 snd_pcm_hw_params_user 0 10520 NULL
104560 +ocfs2_add_refcounted_extent_10526 ocfs2_add_refcounted_extent 6 10526 NULL
104561 +snd_pcm_lib_read_10536 snd_pcm_lib_read 0-3 10536 NULL nohasharray
104562 +kstrtouint_from_user_10536 kstrtouint_from_user 2 10536 &snd_pcm_lib_read_10536
104563 +SYSC_read_10545 SYSC_read 3 10545 NULL
104564 +ext4_write_begin_10576 ext4_write_begin 3-4 10576 NULL
104565 +scrub_remap_extent_10588 scrub_remap_extent 2 10588 NULL
104566 +otp_read_10594 otp_read 5-4-2 10594 NULL
104567 +supply_map_read_file_10608 supply_map_read_file 3 10608 NULL
104568 +ima_show_htable_violations_10619 ima_show_htable_violations 3 10619 NULL
104569 +alloc_coherent_10632 alloc_coherent 2 10632 NULL
104570 +ubi_io_write_vid_hdr_10660 ubi_io_write_vid_hdr 0 10660 NULL nohasharray
104571 +nfs_idmap_lookup_id_10660 nfs_idmap_lookup_id 2 10660 &ubi_io_write_vid_hdr_10660
104572 +efx_max_tx_len_10662 efx_max_tx_len 0-2 10662 NULL
104573 +dtf_read_device_10663 dtf_read_device 3 10663 NULL
104574 +parport_write_10669 parport_write 0 10669 NULL
104575 +alloc_vmap_area_10682 alloc_vmap_area 5 10682 NULL
104576 +edge_write_10692 edge_write 4 10692 NULL
104577 +ubi_wl_get_peb_10699 ubi_wl_get_peb 0 10699 NULL
104578 +inl_10708 inl 0 10708 NULL nohasharray
104579 +selinux_inode_setxattr_10708 selinux_inode_setxattr 4 10708 &inl_10708
104580 +pvr2_ioread_read_10720 pvr2_ioread_read 3 10720 NULL nohasharray
104581 +shash_async_setkey_10720 shash_async_setkey 3 10720 &pvr2_ioread_read_10720
104582 +spi_sync_10731 spi_sync 0 10731 NULL
104583 +sctp_getsockopt_maxseg_10737 sctp_getsockopt_maxseg 2 10737 NULL nohasharray
104584 +apu_get_register_10737 apu_get_register 0 10737 &sctp_getsockopt_maxseg_10737
104585 +alloc_one_pg_vec_page_10747 alloc_one_pg_vec_page 1 10747 NULL
104586 +ttm_ref_object_add_10748 ttm_ref_object_add 0 10748 NULL
104587 +SyS_io_getevents_10756 SyS_io_getevents 3 10756 NULL
104588 +vhost_add_used_n_10760 vhost_add_used_n 3 10760 NULL
104589 +kvm_read_guest_atomic_10765 kvm_read_guest_atomic 2-4 10765 NULL
104590 +wb_set_dirty_10778 wb_set_dirty 2 10778 NULL
104591 +__qp_memcpy_to_queue_10779 __qp_memcpy_to_queue 2-4 10779 NULL
104592 +i915_gem_wait_for_error_10791 i915_gem_wait_for_error 0 10791 NULL
104593 +snd_mask_value_10794 snd_mask_value 0 10794 NULL
104594 +diva_set_trace_filter_10820 diva_set_trace_filter 0-1 10820 NULL
104595 +lbs_sleepparams_read_10840 lbs_sleepparams_read 3 10840 NULL
104596 +ida_get_new_above_10853 ida_get_new_above 2-0 10853 NULL
104597 +fuse_conn_max_background_read_10855 fuse_conn_max_background_read 3 10855 NULL
104598 +ol_chunk_blocks_10864 ol_chunk_blocks 0 10864 NULL
104599 +snd_pcm_oss_write1_10872 snd_pcm_oss_write1 3 10872 NULL
104600 +drm_ht_insert_item_10877 drm_ht_insert_item 0 10877 NULL
104601 +wiidebug_drm_write_10879 wiidebug_drm_write 3 10879 NULL
104602 +get_scq_10897 get_scq 2 10897 NULL
104603 +cgroup_write_string_10900 cgroup_write_string 5 10900 NULL
104604 +tifm_alloc_adapter_10903 tifm_alloc_adapter 1 10903 NULL
104605 +lprocfs_wr_atomic_10912 lprocfs_wr_atomic 3 10912 NULL
104606 +__copy_from_user_10918 __copy_from_user 3-0 10918 NULL
104607 +kobject_add_10919 kobject_add 0 10919 NULL
104608 +da9052_map_irq_10952 da9052_map_irq 2 10952 NULL
104609 +ci_port_test_write_10962 ci_port_test_write 3 10962 NULL
104610 +bm_entry_read_10976 bm_entry_read 3 10976 NULL
104611 +sched_autogroup_write_10984 sched_autogroup_write 3 10984 NULL
104612 +__hci_num_ctrl_10985 __hci_num_ctrl 0 10985 NULL
104613 +lz4_compress_10986 lz4_compress 2 10986 NULL
104614 +do_switch_10994 do_switch 0 10994 NULL
104615 +xfrm_hash_alloc_10997 xfrm_hash_alloc 1 10997 NULL
104616 +btrfs_add_delayed_extent_op_11000 btrfs_add_delayed_extent_op 0 11000 NULL
104617 +rx_filter_accum_arp_pend_requests_read_11003 rx_filter_accum_arp_pend_requests_read 3 11003 NULL
104618 +SetLineNumber_11023 SetLineNumber 0 11023 NULL
104619 +btrfs_find_space_for_alloc_11028 btrfs_find_space_for_alloc 2 11028 NULL
104620 +mb_find_next_bit_11037 mb_find_next_bit 0-2-3 11037 NULL
104621 +tda10048_writeregbulk_11050 tda10048_writeregbulk 4 11050 NULL
104622 +insert_inline_extent_backref_11063 insert_inline_extent_backref 8-0 11063 NULL
104623 +tcp_send_mss_11079 tcp_send_mss 0 11079 NULL
104624 +snd_pcm_delay_11081 snd_pcm_delay 0 11081 NULL
104625 +count_argc_11083 count_argc 0 11083 NULL
104626 +kvm_write_guest_cached_11106 kvm_write_guest_cached 4 11106 NULL
104627 +stmpe_gpio_to_irq_11110 stmpe_gpio_to_irq 2 11110 NULL
104628 +tw_change_queue_depth_11116 tw_change_queue_depth 2 11116 NULL
104629 +page_offset_11120 page_offset 0 11120 NULL
104630 +tracing_buffers_read_11124 tracing_buffers_read 3 11124 NULL nohasharray
104631 +cea_db_payload_len_11124 cea_db_payload_len 0 11124 &tracing_buffers_read_11124
104632 +alloc_alien_cache_11127 alloc_alien_cache 2-1 11127 NULL
104633 +eb_lookup_objects_11163 eb_lookup_objects 0 11163 NULL
104634 +snd_gf1_pcm_playback_silence_11172 snd_gf1_pcm_playback_silence 4-3 11172 NULL
104635 +il_dbgfs_rx_queue_read_11221 il_dbgfs_rx_queue_read 3 11221 NULL
104636 +comedi_alloc_spriv_11234 comedi_alloc_spriv 2 11234 NULL
104637 +mmap_region_11247 mmap_region 0-2 11247 NULL
104638 +ubifs_write_node_11258 ubifs_write_node 5-3 11258 NULL
104639 +dm_cache_discard_bitset_resize_11262 dm_cache_discard_bitset_resize 3 11262 NULL
104640 +hugetlbfs_read_11268 hugetlbfs_read 3 11268 NULL
104641 +ath6kl_power_params_write_11274 ath6kl_power_params_write 3 11274 NULL
104642 +__proc_daemon_file_11305 __proc_daemon_file 5 11305 NULL
104643 +split_vma_11312 split_vma 0 11312 NULL
104644 +btrfs_update_inode_fallback_11313 btrfs_update_inode_fallback 0 11313 NULL
104645 +ext4_xattr_check_names_11314 ext4_xattr_check_names 0 11314 NULL
104646 +sk_filter_size_11316 sk_filter_size 0 11316 NULL nohasharray
104647 +tcp_send_rcvq_11316 tcp_send_rcvq 3 11316 &sk_filter_size_11316
104648 +shmem_radix_tree_replace_11325 shmem_radix_tree_replace 0 11325 NULL
104649 +construct_key_11329 construct_key 3 11329 NULL nohasharray
104650 +__kfifo_out_peek_11329 __kfifo_out_peek 0-3 11329 &construct_key_11329
104651 +next_segment_11330 next_segment 0-2-1 11330 NULL
104652 +persistent_ram_buffer_map_11332 persistent_ram_buffer_map 2-1 11332 NULL
104653 +ext4_get_inline_size_11349 ext4_get_inline_size 0 11349 NULL
104654 +nl80211_send_mgmt_11353 nl80211_send_mgmt 7 11353 NULL nohasharray
104655 +sel_write_create_11353 sel_write_create 3 11353 &nl80211_send_mgmt_11353
104656 +drm_vblank_init_11362 drm_vblank_init 2 11362 NULL
104657 +qib_get_base_info_11369 qib_get_base_info 3 11369 NULL
104658 +isku_sysfs_read_keys_capslock_11392 isku_sysfs_read_keys_capslock 6 11392 NULL
104659 +dev_irnet_write_11398 dev_irnet_write 3 11398 NULL
104660 +__btrfs_add_delayed_insertion_item_11400 __btrfs_add_delayed_insertion_item 0 11400 NULL
104661 +lprocfs_wr_evict_client_11402 lprocfs_wr_evict_client 3 11402 NULL
104662 +___alloc_bootmem_11410 ___alloc_bootmem 1 11410 NULL
104663 +str_to_user_11411 str_to_user 2 11411 NULL
104664 +mem_fw_gen_free_mem_blks_read_11413 mem_fw_gen_free_mem_blks_read 3 11413 NULL
104665 +ath6kl_wmi_test_rx_11414 ath6kl_wmi_test_rx 3 11414 NULL
104666 +relocate_entry_gtt_11416 relocate_entry_gtt 0 11416 NULL
104667 +adis16480_show_firmware_revision_11417 adis16480_show_firmware_revision 3 11417 NULL
104668 +trace_options_read_11419 trace_options_read 3 11419 NULL
104669 +i40e_dbg_command_write_11421 i40e_dbg_command_write 3 11421 NULL
104670 +__irq_set_trigger_11422 __irq_set_trigger 0 11422 NULL nohasharray
104671 +xd_read_multiple_pages_11422 xd_read_multiple_pages 5-4 11422 &__irq_set_trigger_11422
104672 +prepare_image_11424 prepare_image 0 11424 NULL
104673 +ext3_xattr_block_list_11428 ext3_xattr_block_list 3 11428 NULL
104674 +bttv_read_11432 bttv_read 3 11432 NULL
104675 +create_zero_mask_11453 create_zero_mask 0-1 11453 NULL
104676 +do_blockdev_direct_IO_11455 do_blockdev_direct_IO 0-6 11455 NULL
104677 +swp_offset_11475 swp_offset 0 11475 NULL
104678 +st_sensors_write_data_with_mask_11476 st_sensors_write_data_with_mask 3 11476 NULL
104679 +pci_set_power_state_11479 pci_set_power_state 0 11479 NULL nohasharray
104680 +sca3000_read_first_n_hw_rb_11479 sca3000_read_first_n_hw_rb 2 11479 &pci_set_power_state_11479
104681 +xfs_file_buffered_aio_write_11492 xfs_file_buffered_aio_write 4 11492 NULL
104682 +sd_do_mode_sense_11507 sd_do_mode_sense 5 11507 NULL
104683 +kmem_zalloc_11510 kmem_zalloc 1 11510 NULL
104684 +ll_direct_IO_26_seg_11518 ll_direct_IO_26_seg 0 11518 NULL
104685 +twl_direction_in_11527 twl_direction_in 2 11527 NULL
104686 +setup_IO_APIC_irq_extra_11537 setup_IO_APIC_irq_extra 1 11537 NULL
104687 +radix_tree_extend_11555 radix_tree_extend 0 11555 NULL
104688 +skb_cow_data_11565 skb_cow_data 0 11565 NULL
104689 +posix_acl_create_masq_11567 posix_acl_create_masq 0 11567 NULL
104690 +qgroup_account_ref_step2_11575 qgroup_account_ref_step2 0 11575 NULL
104691 +lpfc_idiag_ctlacc_write_11576 lpfc_idiag_ctlacc_write 3 11576 NULL
104692 +oprofilefs_ulong_to_user_11582 oprofilefs_ulong_to_user 3 11582 NULL
104693 +snd_pcm_action_11589 snd_pcm_action 0 11589 NULL
104694 +fw_device_op_ioctl_11595 fw_device_op_ioctl 2 11595 NULL
104695 +btrfs_uuid_tree_add_11598 btrfs_uuid_tree_add 0 11598 NULL
104696 +SYSC_mq_timedsend_11607 SYSC_mq_timedsend 3 11607 NULL
104697 +i915_gem_object_pin_11630 i915_gem_object_pin 0 11630 NULL
104698 +add_new_bitmap_11644 add_new_bitmap 3 11644 NULL
104699 +sisusb_send_bridge_packet_11649 sisusb_send_bridge_packet 2 11649 NULL
104700 +nla_total_size_11658 nla_total_size 0-1 11658 NULL
104701 +slab_ksize_11664 slab_ksize 0 11664 NULL
104702 +__btrfs_run_delayed_items_11671 __btrfs_run_delayed_items 0 11671 NULL
104703 +ide_queue_pc_tail_11673 ide_queue_pc_tail 5 11673 NULL
104704 +compat_SyS_msgsnd_11675 compat_SyS_msgsnd 3 11675 NULL
104705 +btrfs_alloc_delayed_item_11678 btrfs_alloc_delayed_item 1 11678 NULL
104706 +dsp_buffer_alloc_11684 dsp_buffer_alloc 2 11684 NULL
104707 +sctp_setsockopt_hmac_ident_11687 sctp_setsockopt_hmac_ident 3 11687 NULL
104708 +split_11691 split 2 11691 NULL
104709 +snd_ctl_elem_user_tlv_11695 snd_ctl_elem_user_tlv 3 11695 NULL
104710 +blk_rq_cur_bytes_11723 blk_rq_cur_bytes 0 11723 NULL
104711 +dm_bio_prison_create_11749 dm_bio_prison_create 1 11749 NULL
104712 +iwl_dbgfs_qos_read_11753 iwl_dbgfs_qos_read 3 11753 NULL
104713 +intel_map_page_11762 intel_map_page 4-3 11762 NULL
104714 +ocfs2_relink_block_group_11769 ocfs2_relink_block_group 0 11769 NULL
104715 +ps_pspoll_timeouts_read_11776 ps_pspoll_timeouts_read 3 11776 NULL
104716 +ebt_buf_add_11779 ebt_buf_add 0 11779 NULL
104717 +btrfs_key_blockptr_11786 btrfs_key_blockptr 0 11786 NULL
104718 +rtw_alloc_etherdev_11792 rtw_alloc_etherdev 1 11792 NULL
104719 +pcpu_fc_alloc_11818 pcpu_fc_alloc 2 11818 NULL
104720 +umc_device_register_11824 umc_device_register 0 11824 NULL
104721 +zerocopy_sg_from_iovec_11828 zerocopy_sg_from_iovec 3 11828 NULL
104722 +sctp_setsockopt_maxseg_11829 sctp_setsockopt_maxseg 3 11829 NULL
104723 +rts51x_read_status_11830 rts51x_read_status 4 11830 NULL
104724 +unix_stream_connect_11844 unix_stream_connect 3 11844 NULL
104725 +ecryptfs_copy_filename_11868 ecryptfs_copy_filename 4 11868 NULL
104726 +ieee80211_rx_bss_info_11887 ieee80211_rx_bss_info 3 11887 NULL
104727 +mdc_rename_11899 mdc_rename 4-6 11899 NULL
104728 +xstateregs_get_11906 xstateregs_get 4 11906 NULL
104729 +ti_write_11916 ti_write 4 11916 NULL
104730 +fs_devrw_entry_11924 fs_devrw_entry 3 11924 NULL
104731 +bitmap_remap_11929 bitmap_remap 5 11929 NULL
104732 +atomic_sub_return_11939 atomic_sub_return 0-1 11939 NULL
104733 +r1_sync_page_io_11963 r1_sync_page_io 3 11963 NULL
104734 +f1x_swap_interleaved_region_11970 f1x_swap_interleaved_region 0-2 11970 NULL
104735 +usc_InReg_11976 usc_InReg 0 11976 NULL nohasharray
104736 +split_node_11976 split_node 0 11976 &usc_InReg_11976
104737 +BeceemFlashBulkRead_11979 BeceemFlashBulkRead 0 11979 NULL
104738 +read_and_add_raw_conns_11987 read_and_add_raw_conns 0 11987 NULL
104739 +i40e_pci_sriov_configure_12011 i40e_pci_sriov_configure 2 12011 NULL
104740 +intel_init_ring_buffer_12016 intel_init_ring_buffer 0 12016 NULL
104741 +ftdi_elan_total_command_size_12045 ftdi_elan_total_command_size 0 12045 NULL
104742 +did_overwrite_ref_12046 did_overwrite_ref 0 12046 NULL
104743 +ieee80211_if_read_user_power_level_12050 ieee80211_if_read_user_power_level 3 12050 NULL
104744 +il4965_ucode_tx_stats_read_12064 il4965_ucode_tx_stats_read 3 12064 NULL
104745 +ptc_proc_write_12076 ptc_proc_write 3 12076 NULL
104746 +ubifs_recover_log_leb_12079 ubifs_recover_log_leb 3 12079 NULL
104747 +pse36_gfn_delta_12087 pse36_gfn_delta 0-1 12087 NULL
104748 +rtw_malloc2d_12102 rtw_malloc2d 1-2-3 12102 NULL
104749 +twl6030_irq_map_12105 twl6030_irq_map 2 12105 NULL
104750 +i915_gem_evict_something_12118 i915_gem_evict_something 0 12118 NULL
104751 +alloc_bulk_urbs_generic_12127 alloc_bulk_urbs_generic 5 12127 NULL
104752 +set_powered_12129 set_powered 4 12129 NULL
104753 +ramoops_init_prz_12134 ramoops_init_prz 5 12134 NULL
104754 +xfs_handle_to_dentry_12135 xfs_handle_to_dentry 3 12135 NULL
104755 +rawv6_seticmpfilter_12137 rawv6_seticmpfilter 5 12137 NULL
104756 +vmw_fifo_reserve_12141 vmw_fifo_reserve 2 12141 NULL
104757 +rawsock_recvmsg_12144 rawsock_recvmsg 4 12144 NULL
104758 +get_idx_gc_leb_12148 get_idx_gc_leb 0 12148 NULL
104759 +btmrvl_sdio_host_to_card_12152 btmrvl_sdio_host_to_card 3 12152 NULL
104760 +ocfs2_local_alloc_new_window_12153 ocfs2_local_alloc_new_window 0 12153 NULL
104761 +vmbus_open_12154 vmbus_open 2-3 12154 NULL
104762 +dma_memcpy_to_iovec_12173 dma_memcpy_to_iovec 5 12173 NULL
104763 +LNetEQAlloc_12178 LNetEQAlloc 1 12178 NULL
104764 +ddp_make_gl_12179 ddp_make_gl 1 12179 NULL
104765 +compat_do_arpt_set_ctl_12184 compat_do_arpt_set_ctl 4 12184 NULL
104766 +ip_generic_getfrag_12187 ip_generic_getfrag 3-4 12187 NULL
104767 +scaled_div_12201 scaled_div 2-1-0 12201 NULL
104768 +snd_pcm_kernel_ioctl_12219 snd_pcm_kernel_ioctl 0 12219 NULL
104769 +fuse_get_req_12221 fuse_get_req 2 12221 NULL nohasharray
104770 +aat2870_reg_read_file_12221 aat2870_reg_read_file 3 12221 &fuse_get_req_12221
104771 +__alloc_bootmem_low_nopanic_12235 __alloc_bootmem_low_nopanic 1 12235 NULL
104772 +kvirt_to_pa_12247 kvirt_to_pa 0 12247 NULL
104773 +ib_uverbs_unmarshall_recv_12251 ib_uverbs_unmarshall_recv 5 12251 NULL
104774 +shash_compat_setkey_12267 shash_compat_setkey 3 12267 NULL
104775 +add_sctp_bind_addr_12269 add_sctp_bind_addr 3 12269 NULL
104776 +note_last_dentry_12285 note_last_dentry 3 12285 NULL
104777 +il_dbgfs_nvm_read_12288 il_dbgfs_nvm_read 3 12288 NULL nohasharray
104778 +roundup_to_multiple_of_64_12288 roundup_to_multiple_of_64 0-1 12288 &il_dbgfs_nvm_read_12288
104779 +wrap_min_12303 wrap_min 0-1-2 12303 NULL
104780 +bt_sock_recvmsg_12316 bt_sock_recvmsg 4 12316 NULL
104781 +pcbit_writecmd_12332 pcbit_writecmd 2 12332 NULL
104782 +btrfs_lookup_file_extent_12341 btrfs_lookup_file_extent 0 12341 NULL
104783 +mptctl_ioctl_12355 mptctl_ioctl 2 12355 NULL
104784 +paging32_walk_addr_12359 paging32_walk_addr 3 12359 NULL
104785 +__nf_ct_ext_add_length_12364 __nf_ct_ext_add_length 3 12364 NULL
104786 +xfs_iext_inline_to_direct_12384 xfs_iext_inline_to_direct 2 12384 NULL
104787 +btrfs_file_extent_ram_bytes_12391 btrfs_file_extent_ram_bytes 0 12391 NULL nohasharray
104788 +populate_dir_12391 populate_dir 0 12391 &btrfs_file_extent_ram_bytes_12391
104789 +ntfs_get_size_for_mapping_pairs_12413 ntfs_get_size_for_mapping_pairs 0 12413 NULL nohasharray
104790 +ext4_bg_num_gdb_nometa_12413 ext4_bg_num_gdb_nometa 0 12413 &ntfs_get_size_for_mapping_pairs_12413
104791 +hbucket_elem_add_12416 hbucket_elem_add 3 12416 NULL
104792 +ieee80211_if_read_num_mcast_sta_12419 ieee80211_if_read_num_mcast_sta 3 12419 NULL
104793 +cfs_array_alloc_12441 cfs_array_alloc 2 12441 NULL
104794 +skb_do_copy_data_nocache_12465 skb_do_copy_data_nocache 5 12465 NULL
104795 +x25_sendmsg_12487 x25_sendmsg 4 12487 NULL
104796 +fnic_trace_ctrl_read_12497 fnic_trace_ctrl_read 3 12497 NULL
104797 +qib_alloc_fast_reg_mr_12526 qib_alloc_fast_reg_mr 2 12526 NULL
104798 +xfs_get_extsz_hint_12531 xfs_get_extsz_hint 0 12531 NULL
104799 +kvm_setup_async_pf_12555 kvm_setup_async_pf 3 12555 NULL
104800 +ib_umem_get_12557 ib_umem_get 2-3 12557 NULL
104801 +ceph_osdc_wait_request_12572 ceph_osdc_wait_request 0 12572 NULL
104802 +hvc_alloc_12579 hvc_alloc 4 12579 NULL
104803 +snd_pcm_plugin_alloc_12580 snd_pcm_plugin_alloc 2 12580 NULL
104804 +pcpu_extend_area_map_12589 pcpu_extend_area_map 2 12589 NULL
104805 +tlbflush_write_file_12598 tlbflush_write_file 3 12598 NULL
104806 +vhci_put_user_12604 vhci_put_user 4 12604 NULL
104807 +ipath_mmap_mem_12625 ipath_mmap_mem 3 12625 NULL
104808 +sdhci_pltfm_init_12627 sdhci_pltfm_init 3 12627 NULL
104809 +pwr_rcvd_awake_bcns_cnt_read_12632 pwr_rcvd_awake_bcns_cnt_read 3 12632 NULL
104810 +pn_sendmsg_12640 pn_sendmsg 4 12640 NULL
104811 +dwc3_link_state_write_12641 dwc3_link_state_write 3 12641 NULL
104812 +nr_recvmsg_12649 nr_recvmsg 4 12649 NULL
104813 +wb_create_12651 wb_create 1 12651 NULL
104814 +rtw_android_get_link_speed_12655 rtw_android_get_link_speed 0 12655 NULL
104815 +ocfs2_read_block_12659 ocfs2_read_block 0 12659 NULL
104816 +sel_read_class_12669 sel_read_class 3 12669 NULL nohasharray
104817 +sparse_mem_maps_populate_node_12669 sparse_mem_maps_populate_node 4 12669 &sel_read_class_12669
104818 +ext4_writepage_trans_blocks_12674 ext4_writepage_trans_blocks 0 12674 NULL
104819 +ext4_bg_num_gdb_meta_12702 ext4_bg_num_gdb_meta 0 12702 NULL
104820 +iwl_dbgfs_calib_disabled_write_12707 iwl_dbgfs_calib_disabled_write 3 12707 NULL
104821 +ieee80211_if_read_num_buffered_multicast_12716 ieee80211_if_read_num_buffered_multicast 3 12716 NULL
104822 +ivtv_write_12721 ivtv_write 3 12721 NULL
104823 +key_rx_spec_read_12736 key_rx_spec_read 3 12736 NULL
104824 +__mei_cl_async_send_12737 __mei_cl_async_send 3 12737 NULL
104825 +run_delayed_data_ref_12749 run_delayed_data_ref 0 12749 NULL
104826 +ieee80211_if_read_dot11MeshMaxRetries_12756 ieee80211_if_read_dot11MeshMaxRetries 3 12756 NULL
104827 +listxattr_12769 listxattr 3 12769 NULL
104828 +sctp_ssnmap_init_12772 sctp_ssnmap_init 2-3 12772 NULL
104829 +btrfs_remove_free_space_12793 btrfs_remove_free_space 2-0 12793 NULL
104830 +scsi_adjust_queue_depth_12802 scsi_adjust_queue_depth 3 12802 NULL
104831 +xfs_inumbers_fmt_12817 xfs_inumbers_fmt 3 12817 NULL
104832 +tlv_put_uuid_12824 tlv_put_uuid 0 12824 NULL
104833 +readq_12825 readq 0 12825 NULL
104834 +SyS_add_key_12834 SyS_add_key 4 12834 NULL
104835 +TSS_authhmac_12839 TSS_authhmac 3 12839 NULL
104836 +spidev_sync_12842 spidev_sync 0 12842 NULL
104837 +spidev_ioctl_12846 spidev_ioctl 2 12846 NULL
104838 +get_leb_cnt_12892 get_leb_cnt 0-2 12892 NULL
104839 +ocfs2_hamming_encode_block_12904 ocfs2_hamming_encode_block 2 12904 NULL
104840 +get_virtual_node_size_12908 get_virtual_node_size 0 12908 NULL
104841 +rds_pages_in_vec_12922 rds_pages_in_vec 0 12922 NULL
104842 +free_tind_blocks_12926 free_tind_blocks 0 12926 NULL
104843 +ci_ll_init_12930 ci_ll_init 3 12930 NULL
104844 +SYSC_sendfile_12936 SYSC_sendfile 4 12936 NULL
104845 +do_inode_permission_12946 do_inode_permission 0 12946 NULL
104846 +bm_status_write_12964 bm_status_write 3 12964 NULL
104847 +raid56_parity_recover_12987 raid56_parity_recover 5 12987 NULL
104848 +TransmitTcb_12989 TransmitTcb 4 12989 NULL
104849 +sk_peek_offset_12991 sk_peek_offset 0 12991 NULL
104850 +subsystem_filter_write_13022 subsystem_filter_write 3 13022 NULL
104851 +btrfs_write_dirty_block_groups_13030 btrfs_write_dirty_block_groups 0 13030 NULL
104852 +generic_segment_checks_13041 generic_segment_checks 0 13041 NULL
104853 +SyS_sendfile64_13043 SyS_sendfile64 4 13043 NULL
104854 +ocfs2_write_begin_13045 ocfs2_write_begin 3-4 13045 NULL
104855 +ptlrpc_lprocfs_threads_min_seq_write_13060 ptlrpc_lprocfs_threads_min_seq_write 3 13060 NULL nohasharray
104856 +__dn_setsockopt_13060 __dn_setsockopt 5 13060 &ptlrpc_lprocfs_threads_min_seq_write_13060
104857 +biovec_create_pool_13079 biovec_create_pool 2 13079 NULL
104858 +_ocfs2_free_suballoc_bits_13085 _ocfs2_free_suballoc_bits 0 13085 NULL
104859 +irq_set_chip_and_handler_13088 irq_set_chip_and_handler 1 13088 NULL
104860 +xattr_getsecurity_13090 xattr_getsecurity 0 13090 NULL
104861 +mb_find_next_zero_bit_13100 mb_find_next_zero_bit 2-3-0 13100 NULL
104862 +ttm_dma_pool_alloc_new_pages_13105 ttm_dma_pool_alloc_new_pages 3 13105 NULL
104863 +SyS_msgrcv_13109 SyS_msgrcv 3 13109 NULL
104864 +snd_rme96_playback_copy_13111 snd_rme96_playback_copy 5 13111 NULL
104865 +xen_allocate_irq_dynamic_13116 xen_allocate_irq_dynamic 0 13116 NULL
104866 +bfad_debugfs_read_13119 bfad_debugfs_read 3 13119 NULL
104867 +blk_update_request_13146 blk_update_request 3 13146 NULL
104868 +caif_stream_recvmsg_13173 caif_stream_recvmsg 4 13173 NULL
104869 +pwr_disable_ps_read_13176 pwr_disable_ps_read 3 13176 NULL
104870 +ucs2_strlen_13178 ucs2_strlen 0 13178 NULL
104871 +dgrp_net_ioctl_13183 dgrp_net_ioctl 2 13183 NULL
104872 +create_trace_uprobe_13184 create_trace_uprobe 1 13184 NULL
104873 +__cmpxchg64_13187 __cmpxchg64 0 13187 NULL
104874 +comedi_read_13199 comedi_read 3 13199 NULL
104875 +mmc_ext_csd_read_13205 mmc_ext_csd_read 3 13205 NULL
104876 +__nodes_fold_13215 __nodes_fold 4 13215 NULL
104877 +get_unaligned_le64_13219 get_unaligned_le64 0 13219 NULL
104878 +svm_msrpm_offset_13220 svm_msrpm_offset 0-1 13220 NULL
104879 +fnic_trace_ctrl_write_13229 fnic_trace_ctrl_write 3 13229 NULL
104880 +asix_read_cmd_13245 asix_read_cmd 5 13245 NULL
104881 +kvm_lapic_enable_pv_eoi_13249 kvm_lapic_enable_pv_eoi 2 13249 NULL
104882 +init_tid_tabs_13252 init_tid_tabs 2-3-4 13252 NULL
104883 +bio_integrity_trim_13259 bio_integrity_trim 3 13259 NULL
104884 +simple_attr_write_13260 simple_attr_write 3 13260 NULL
104885 +c4iw_reg_user_mr_13269 c4iw_reg_user_mr 2-3 13269 NULL
104886 +pmcraid_notify_aen_13274 pmcraid_notify_aen 3 13274 NULL
104887 +il4965_stats_flag_13281 il4965_stats_flag 0-3 13281 NULL
104888 +lpfc_idiag_mbxacc_get_setup_13282 lpfc_idiag_mbxacc_get_setup 0 13282 NULL
104889 +sd_major_13294 sd_major 0-1 13294 NULL
104890 +kempld_read16_13297 kempld_read16 0 13297 NULL
104891 +reexecute_instruction_13321 reexecute_instruction 2 13321 NULL
104892 +__clone_and_map_data_bio_13334 __clone_and_map_data_bio 4-8 13334 NULL
104893 +kvm_read_nested_guest_page_13337 kvm_read_nested_guest_page 5-2 13337 NULL
104894 +get_bits_13353 get_bits 0-2 13353 NULL
104895 +hscx_empty_fifo_13360 hscx_empty_fifo 2 13360 NULL
104896 +iso_sched_alloc_13377 iso_sched_alloc 1 13377 NULL nohasharray
104897 +wep_key_not_found_read_13377 wep_key_not_found_read 3 13377 &iso_sched_alloc_13377
104898 +ext4_meta_trans_blocks_13380 ext4_meta_trans_blocks 0-3-2 13380 NULL
104899 +lov_mds_md_size_13388 lov_mds_md_size 0-1 13388 NULL nohasharray
104900 +dis_bypass_write_13388 dis_bypass_write 3 13388 &lov_mds_md_size_13388
104901 +BcmSetActiveSection_13389 BcmSetActiveSection 0 13389 NULL
104902 +ocfs2_inode_lock_update_13414 ocfs2_inode_lock_update 0 13414 NULL
104903 +netxen_alloc_sds_rings_13417 netxen_alloc_sds_rings 2 13417 NULL
104904 +compat_SyS_sendfile64_13420 compat_SyS_sendfile64 4 13420 NULL
104905 +keyring_read_13438 keyring_read 3 13438 NULL
104906 +sctp_setsockopt_peer_primary_addr_13440 sctp_setsockopt_peer_primary_addr 3 13440 NULL
104907 +ath6kl_cfg80211_connect_event_13443 ath6kl_cfg80211_connect_event 8-9-7 13443 NULL
104908 +sb_init_dio_done_wq_13482 sb_init_dio_done_wq 0 13482 NULL
104909 +data_read_13494 data_read 3 13494 NULL
104910 +i915_switch_context_13498 i915_switch_context 0 13498 NULL
104911 +ioat_chansts_32_13506 ioat_chansts_32 0 13506 NULL
104912 +ocfs2_align_bytes_to_blocks_13512 ocfs2_align_bytes_to_blocks 2-0 13512 NULL
104913 +core_status_13515 core_status 4 13515 NULL
104914 +smk_write_mapped_13519 smk_write_mapped 3 13519 NULL
104915 +bm_init_13529 bm_init 2 13529 NULL
104916 +kvm_get_cr8_13538 kvm_get_cr8 0 13538 NULL
104917 +SYSC_remap_file_pages_13540 SYSC_remap_file_pages 1-2 13540 NULL nohasharray
104918 +non_atomic_pte_lookup_13540 non_atomic_pte_lookup 2 13540 &SYSC_remap_file_pages_13540
104919 +__btrfs_alloc_chunk_13554 __btrfs_alloc_chunk 0 13554 NULL
104920 +llcp_sock_recvmsg_13556 llcp_sock_recvmsg 4 13556 NULL
104921 +ieee80211_if_read_ap_power_level_13558 ieee80211_if_read_ap_power_level 3 13558 NULL
104922 +ubifs_get_idx_gc_leb_13566 ubifs_get_idx_gc_leb 0 13566 NULL
104923 +read_file_antenna_13574 read_file_antenna 3 13574 NULL
104924 +cache_write_13589 cache_write 3 13589 NULL
104925 +Rd_Indx_13602 Rd_Indx 3-2 13602 NULL
104926 +swap_cgroup_swapon_13614 swap_cgroup_swapon 2 13614 NULL
104927 +wm8994_bulk_write_13615 wm8994_bulk_write 2-3 13615 NULL
104928 +pmcraid_get_minor_13619 pmcraid_get_minor 0 13619 NULL
104929 +packet_snd_13634 packet_snd 3 13634 NULL
104930 +blk_msg_write_13655 blk_msg_write 3 13655 NULL
104931 +cache_downcall_13666 cache_downcall 3 13666 NULL
104932 +ext3_xattr_list_entries_13682 ext3_xattr_list_entries 0-4 13682 NULL
104933 +nv94_aux_13689 nv94_aux 2-5 13689 NULL
104934 +usb_get_string_13693 usb_get_string 0 13693 NULL
104935 +atomic_cmpxchg_13700 atomic_cmpxchg 0 13700 NULL
104936 +fw_iso_buffer_alloc_13704 fw_iso_buffer_alloc 2 13704 NULL
104937 +ocfs2_cache_block_dealloc_13731 ocfs2_cache_block_dealloc 0 13731 NULL
104938 +netdev_queue_numa_node_read_13732 netdev_queue_numa_node_read 0 13732 NULL
104939 +audit_unpack_string_13748 audit_unpack_string 3 13748 NULL
104940 +ufs_dtog_13750 ufs_dtog 0-2 13750 NULL
104941 +ieee802154_alloc_device_13767 ieee802154_alloc_device 1 13767 NULL
104942 +fb_sys_read_13778 fb_sys_read 3 13778 NULL
104943 +ath6kl_mgmt_powersave_ap_13791 ath6kl_mgmt_powersave_ap 6 13791 NULL
104944 +random_read_13815 random_read 3 13815 NULL
104945 +mutex_lock_interruptible_nested_13817 mutex_lock_interruptible_nested 0 13817 NULL
104946 +hsi_register_board_info_13820 hsi_register_board_info 2 13820 NULL
104947 +___mei_cl_send_13821 ___mei_cl_send 3 13821 NULL
104948 +enc_pools_insert_13849 enc_pools_insert 3 13849 NULL
104949 +evdev_ioctl_compat_13851 evdev_ioctl_compat 2 13851 NULL
104950 +compat_ip_setsockopt_13870 compat_ip_setsockopt 5 13870 NULL
104951 +btrfs_insert_empty_item_13885 btrfs_insert_empty_item 0 13885 NULL
104952 +qp_memcpy_to_queue_13886 qp_memcpy_to_queue 5-2 13886 NULL nohasharray
104953 +__fsnotify_parent_13886 __fsnotify_parent 0 13886 &qp_memcpy_to_queue_13886
104954 +window_alignment_13895 window_alignment 0 13895 NULL
104955 +snd_pcm_aio_read_13900 snd_pcm_aio_read 3 13900 NULL
104956 +is_inode_existent_13913 is_inode_existent 0 13913 NULL
104957 +cfg80211_inform_bss_width_13933 cfg80211_inform_bss_width 9 13933 NULL
104958 +ext3_xattr_block_get_13936 ext3_xattr_block_get 0 13936 NULL
104959 +ieee80211_if_read_dot11MeshForwarding_13940 ieee80211_if_read_dot11MeshForwarding 3 13940 NULL nohasharray
104960 +ocfs2_xa_value_truncate_13940 ocfs2_xa_value_truncate 2 13940 &ieee80211_if_read_dot11MeshForwarding_13940
104961 +iwl_dbgfs_protection_mode_read_13943 iwl_dbgfs_protection_mode_read 3 13943 NULL
104962 +ieee80211_if_read_min_discovery_timeout_13946 ieee80211_if_read_min_discovery_timeout 3 13946 NULL
104963 +qib_mmap_mem_13947 qib_mmap_mem 3 13947 NULL
104964 +lpfc_idiag_queacc_read_13950 lpfc_idiag_queacc_read 3 13950 NULL
104965 +osc_grant_shrink_interval_seq_write_13952 osc_grant_shrink_interval_seq_write 3 13952 NULL
104966 +ocfs2_refresh_slot_info_13957 ocfs2_refresh_slot_info 0 13957 NULL
104967 +snd_pcm_plug_slave_size_13967 snd_pcm_plug_slave_size 0-2 13967 NULL
104968 +qcam_read_13977 qcam_read 3 13977 NULL
104969 +dsp_read_13980 dsp_read 2 13980 NULL
104970 +bm_block_bits_13981 bm_block_bits 0 13981 NULL nohasharray
104971 +dvb_demux_read_13981 dvb_demux_read 3 13981 &bm_block_bits_13981
104972 +create_files_14003 create_files 0 14003 NULL
104973 +sddr09_write_data_14014 sddr09_write_data 3 14014 NULL
104974 +btrfs_get_blocks_direct_14016 btrfs_get_blocks_direct 2 14016 NULL
104975 +dmi_format_ids_14018 dmi_format_ids 2 14018 NULL
104976 +iscsi_create_flashnode_conn_14022 iscsi_create_flashnode_conn 4 14022 NULL
104977 +dvb_usercopy_14036 dvb_usercopy 2 14036 NULL
104978 +read_def_modal_eeprom_14041 read_def_modal_eeprom 3 14041 NULL
104979 +ieee80211_if_fmt_aid_14055 ieee80211_if_fmt_aid 3 14055 NULL
104980 +sta_agg_status_read_14058 sta_agg_status_read 3 14058 NULL
104981 +ovs_flow_actions_alloc_14072 ovs_flow_actions_alloc 1 14072 NULL
104982 +lov_stripeoffset_seq_write_14078 lov_stripeoffset_seq_write 3 14078 NULL
104983 +do_proc_readlink_14096 do_proc_readlink 3 14096 NULL
104984 +compat_sys_pselect6_14105 compat_sys_pselect6 1 14105 NULL
104985 +intel_ring_wait_seqno_14107 intel_ring_wait_seqno 0 14107 NULL
104986 +ext4_journal_blocks_per_page_14127 ext4_journal_blocks_per_page 0 14127 NULL
104987 +ntfs_rl_replace_14136 ntfs_rl_replace 4-2 14136 NULL
104988 +qfq_ffs_14139 qfq_ffs 2 14139 NULL
104989 +isku_sysfs_read_light_14140 isku_sysfs_read_light 6 14140 NULL
104990 +em_canid_change_14150 em_canid_change 3 14150 NULL
104991 +gsm_dlci_data_14155 gsm_dlci_data 3 14155 NULL
104992 +print_input_mask_14168 print_input_mask 3-0 14168 NULL
104993 +ocfs2_split_and_insert_14171 ocfs2_split_and_insert 0 14171 NULL
104994 +ocfs2_xattr_value_truncate_14183 ocfs2_xattr_value_truncate 3 14183 NULL
104995 +datafab_read_data_14186 datafab_read_data 4 14186 NULL
104996 +do_splice_14196 do_splice 5 14196 NULL
104997 +hfsplus_brec_find_14200 hfsplus_brec_find 0 14200 NULL
104998 +alloc_async_14208 alloc_async 1 14208 NULL
104999 +ath6kl_regread_write_14220 ath6kl_regread_write 3 14220 NULL
105000 +ieee80211_if_write_uapsd_max_sp_len_14233 ieee80211_if_write_uapsd_max_sp_len 3 14233 NULL
105001 +dma_declare_coherent_memory_14244 dma_declare_coherent_memory 4 14244 NULL
105002 +snd_soc_hw_bulk_write_raw_14245 snd_soc_hw_bulk_write_raw 2-4 14245 NULL
105003 +ext4_journal_restart_14251 ext4_journal_restart 0 14251 NULL
105004 +will_overwrite_ref_14252 will_overwrite_ref 0 14252 NULL
105005 +ath6kl_connect_event_14267 ath6kl_connect_event 7-8-9 14267 NULL
105006 +numa_mem_id_14279 numa_mem_id 0 14279 NULL
105007 +rr_status_14293 rr_status 5 14293 NULL
105008 +read_default_ldt_14302 read_default_ldt 2 14302 NULL
105009 +__readl_14308 __readl 0 14308 NULL
105010 +send_chown_14309 send_chown 0 14309 NULL
105011 +i915_gem_object_finish_gpu_14312 i915_gem_object_finish_gpu 0 14312 NULL
105012 +oo_objects_14319 oo_objects 0 14319 NULL
105013 +ww_mutex_deadlock_injection_14321 ww_mutex_deadlock_injection 0 14321 NULL
105014 +ll_get_user_pages_14328 ll_get_user_pages 2-3-0 14328 NULL
105015 +down_write_failed_14331 down_write_failed 2 14331 NULL
105016 +p9_client_zc_rpc_14345 p9_client_zc_rpc 7 14345 NULL
105017 +alloc_tx_struct_14349 alloc_tx_struct 1 14349 NULL
105018 +snd_pcm_lib_readv_14363 snd_pcm_lib_readv 0-3 14363 NULL
105019 +btrfs_set_inode_index_14379 btrfs_set_inode_index 0 14379 NULL
105020 +acpi_get_override_irq_14381 acpi_get_override_irq 1 14381 NULL
105021 +ath6kl_regdump_read_14393 ath6kl_regdump_read 3 14393 NULL
105022 +smk_write_onlycap_14400 smk_write_onlycap 3 14400 NULL
105023 +first_logical_byte_14403 first_logical_byte 0 14403 NULL
105024 +mtd_concat_create_14416 mtd_concat_create 2 14416 NULL
105025 +get_kcore_size_14425 get_kcore_size 0 14425 NULL
105026 +qib_user_sdma_page_length_14432 qib_user_sdma_page_length 0-2-1 14432 NULL
105027 +check_lpt_crc_14442 check_lpt_crc 0 14442 NULL
105028 +block_size_14443 block_size 0 14443 NULL
105029 +lmv_user_md_size_14456 lmv_user_md_size 0-1 14456 NULL
105030 +snd_emu10k1_proc_spdif_status_14457 snd_emu10k1_proc_spdif_status 4-5 14457 NULL
105031 +lustre_msg_size_v2_14470 lustre_msg_size_v2 0 14470 NULL
105032 +dma_transfer_size_14473 dma_transfer_size 0 14473 NULL
105033 +udplite_getfrag_14479 udplite_getfrag 3-4 14479 NULL
105034 +efx_mdio_check_mmds_14486 efx_mdio_check_mmds 2 14486 NULL nohasharray
105035 +ieee80211_if_read_dot11MeshGateAnnouncementProtocol_14486 ieee80211_if_read_dot11MeshGateAnnouncementProtocol 3 14486 &efx_mdio_check_mmds_14486
105036 +ocfs2_debug_read_14507 ocfs2_debug_read 3 14507 NULL
105037 +ep0_write_14536 ep0_write 3 14536 NULL nohasharray
105038 +dataflash_read_user_otp_14536 dataflash_read_user_otp 3-2 14536 &ep0_write_14536
105039 +register_trace_sched_switch_14545 register_trace_sched_switch 0 14545 NULL
105040 +picolcd_debug_eeprom_read_14549 picolcd_debug_eeprom_read 3 14549 NULL
105041 +drm_vmalloc_dma_14550 drm_vmalloc_dma 1 14550 NULL
105042 +qp_host_alloc_queue_14566 qp_host_alloc_queue 1 14566 NULL
105043 +SyS_setdomainname_14569 SyS_setdomainname 2 14569 NULL
105044 +remap_to_origin_then_cache_14583 remap_to_origin_then_cache 3 14583 NULL
105045 +idmap_pipe_downcall_14591 idmap_pipe_downcall 3 14591 NULL
105046 +ceph_osdc_alloc_request_14597 ceph_osdc_alloc_request 3 14597 NULL
105047 +ocfs2_steal_meta_14602 ocfs2_steal_meta 0 14602 NULL
105048 +ocfs2_trim_group_14641 ocfs2_trim_group 4-3 14641 NULL
105049 +dbJoin_14644 dbJoin 0 14644 NULL
105050 +profile_replace_14652 profile_replace 3 14652 NULL
105051 +add_to_page_cache_locked_14668 add_to_page_cache_locked 0 14668 NULL
105052 +min_bytes_needed_14675 min_bytes_needed 0 14675 NULL
105053 +nvme_trans_log_info_exceptions_14677 nvme_trans_log_info_exceptions 3 14677 NULL
105054 +pipeline_enc_tx_stat_fifo_int_read_14680 pipeline_enc_tx_stat_fifo_int_read 3 14680 NULL
105055 +ieee80211_if_fmt_rc_rateidx_mask_2ghz_14683 ieee80211_if_fmt_rc_rateidx_mask_2ghz 3 14683 NULL
105056 +SyS_fsetxattr_14702 SyS_fsetxattr 4 14702 NULL
105057 +persistent_ram_ecc_string_14704 persistent_ram_ecc_string 0 14704 NULL
105058 +u_audio_playback_14709 u_audio_playback 3 14709 NULL
105059 +rtw_cbuf_alloc_14710 rtw_cbuf_alloc 1 14710 NULL
105060 +cgroup_path_14713 cgroup_path 3 14713 NULL
105061 +get_bio_block_14714 get_bio_block 0 14714 NULL
105062 +vfd_write_14717 vfd_write 3 14717 NULL
105063 +SyS_sendfile_14718 SyS_sendfile 4 14718 NULL
105064 +__blk_end_request_14729 __blk_end_request 3 14729 NULL
105065 +raid1_resize_14740 raid1_resize 2 14740 NULL
105066 +i915_error_state_buf_init_14742 i915_error_state_buf_init 2 14742 NULL
105067 +btrfs_inode_extref_name_len_14752 btrfs_inode_extref_name_len 0 14752 NULL
105068 +rx_rx_cmplt_read_14753 rx_rx_cmplt_read 3 14753 NULL
105069 +regmap_range_read_file_14775 regmap_range_read_file 3 14775 NULL
105070 +sta_dev_read_14782 sta_dev_read 3 14782 NULL
105071 +keys_proc_write_14792 keys_proc_write 3 14792 NULL
105072 +ext4_kvmalloc_14796 ext4_kvmalloc 1 14796 NULL
105073 +__kfifo_in_14797 __kfifo_in 3-0 14797 NULL
105074 +hpet_readl_14801 hpet_readl 0 14801 NULL nohasharray
105075 +snd_als300_gcr_read_14801 snd_als300_gcr_read 0 14801 &hpet_readl_14801
105076 +changed_cb_14819 changed_cb 0 14819 NULL
105077 +do_tune_cpucache_14828 do_tune_cpucache 2 14828 NULL
105078 +mrp_attr_create_14853 mrp_attr_create 3 14853 NULL
105079 +lcd_write_14857 lcd_write 3 14857 NULL
105080 +get_user_cpu_mask_14861 get_user_cpu_mask 2 14861 NULL
105081 +gmux_index_read8_14890 gmux_index_read8 0 14890 NULL
105082 +acpi_os_allocate_14892 acpi_os_allocate 1 14892 NULL
105083 +SYSC_readv_14901 SYSC_readv 3 14901 NULL
105084 +regmap_irq_get_virq_14910 regmap_irq_get_virq 2-0 14910 NULL
105085 +__arch_hweight64_14923 __arch_hweight64 0 14923 NULL nohasharray
105086 +qp_memcpy_to_queue_iov_14923 qp_memcpy_to_queue_iov 5-2 14923 &__arch_hweight64_14923
105087 +ocfs2_expand_nonsparse_inode_14936 ocfs2_expand_nonsparse_inode 3-4 14936 NULL
105088 +range_to_mtrr_14940 range_to_mtrr 2 14940 NULL
105089 +queue_cnt_14951 queue_cnt 0 14951 NULL
105090 +unix_dgram_recvmsg_14952 unix_dgram_recvmsg 4 14952 NULL
105091 +i915_vma_unbind_14954 i915_vma_unbind 0 14954 NULL
105092 +videobuf_read_stream_14956 videobuf_read_stream 3 14956 NULL
105093 +mce_flush_rx_buffer_14976 mce_flush_rx_buffer 2 14976 NULL
105094 +setkey_14987 setkey 3 14987 NULL nohasharray
105095 +gpio_twl4030_write_14987 gpio_twl4030_write 1 14987 &setkey_14987
105096 +xfs_dinode_size_14996 xfs_dinode_size 0 14996 NULL
105097 +blk_integrity_tuple_size_15027 blk_integrity_tuple_size 0 15027 NULL
105098 +irq_get_next_irq_15053 irq_get_next_irq 1-0 15053 NULL
105099 +cld_pipe_downcall_15058 cld_pipe_downcall 3 15058 NULL
105100 +ieee80211_if_read_uapsd_max_sp_len_15067 ieee80211_if_read_uapsd_max_sp_len 3 15067 NULL
105101 +nfs4_write_cached_acl_15070 nfs4_write_cached_acl 4 15070 NULL
105102 +ntfs_copy_from_user_15072 ntfs_copy_from_user 3-5-0 15072 NULL
105103 +pppoe_recvmsg_15073 pppoe_recvmsg 4 15073 NULL
105104 +ceph_calc_ceph_pg_15075 ceph_calc_ceph_pg 0 15075 NULL
105105 +perf_trace_sched_stat_runtime_15115 perf_trace_sched_stat_runtime 3 15115 NULL
105106 +hex_dump_to_buffer_15121 hex_dump_to_buffer 6 15121 NULL
105107 +start_port_15124 start_port 0 15124 NULL
105108 +ipwireless_ppp_mru_15153 ipwireless_ppp_mru 0 15153 NULL
105109 +iwl_dbgfs_sta_drain_write_15167 iwl_dbgfs_sta_drain_write 3 15167 NULL
105110 +self_check_not_bad_15175 self_check_not_bad 0 15175 NULL
105111 +SYSC_setdomainname_15180 SYSC_setdomainname 2 15180 NULL
105112 +iscsi_create_endpoint_15193 iscsi_create_endpoint 1 15193 NULL
105113 +reserve_resources_15194 reserve_resources 3 15194 NULL
105114 +mtt_alloc_res_15211 mtt_alloc_res 5 15211 NULL
105115 +bfad_debugfs_write_regrd_15218 bfad_debugfs_write_regrd 3 15218 NULL
105116 +iwl_dbgfs_sram_write_15239 iwl_dbgfs_sram_write 3 15239 NULL
105117 +il_dbgfs_rx_stats_read_15243 il_dbgfs_rx_stats_read 3 15243 NULL
105118 +div64_u64_15263 div64_u64 0-1-2 15263 NULL
105119 +simple_strtol_15273 simple_strtol 0 15273 NULL
105120 +fw_realloc_buffer_15280 fw_realloc_buffer 2 15280 NULL
105121 +arch_enable_uv_irq_15294 arch_enable_uv_irq 2 15294 NULL
105122 +acpi_ev_create_gpe_block_15297 acpi_ev_create_gpe_block 5 15297 NULL
105123 +ocfs2_read_refcount_block_15305 ocfs2_read_refcount_block 0 15305 NULL
105124 +__ocfs2_remove_xattr_range_15330 __ocfs2_remove_xattr_range 4-3-5 15330 NULL
105125 +bfloat_mantissa_15334 bfloat_mantissa 0 15334 NULL
105126 +xlog_ticket_alloc_15335 xlog_ticket_alloc 2 15335 NULL
105127 +kovaplus_sysfs_read_15337 kovaplus_sysfs_read 6 15337 NULL
105128 +ioread16_15342 ioread16 0 15342 NULL
105129 +ept_prefetch_gpte_15348 ept_prefetch_gpte 4 15348 NULL
105130 +acpi_ut_create_string_object_15360 acpi_ut_create_string_object 1 15360 NULL
105131 +count_inode_extrefs_15366 count_inode_extrefs 0 15366 NULL
105132 +ext4_direct_IO_15369 ext4_direct_IO 4 15369 NULL
105133 +graph_depth_read_15371 graph_depth_read 3 15371 NULL
105134 +compat_sys_process_vm_readv_15374 compat_sys_process_vm_readv 3-5 15374 NULL
105135 +fq_codel_zalloc_15378 fq_codel_zalloc 1 15378 NULL
105136 +domain_flush_pages_15379 domain_flush_pages 2-3 15379 NULL
105137 +alloc_fddidev_15382 alloc_fddidev 1 15382 NULL
105138 +btrfs_level_size_15392 btrfs_level_size 0 15392 NULL
105139 +pipeline_csum_to_rx_xfer_swi_read_15403 pipeline_csum_to_rx_xfer_swi_read 3 15403 NULL
105140 +get_modalias_15406 get_modalias 2 15406 NULL
105141 +blockdev_direct_IO_15408 blockdev_direct_IO 0-5 15408 NULL
105142 +dm_cache_resize_15422 dm_cache_resize 2 15422 NULL
105143 +__videobuf_copy_to_user_15423 __videobuf_copy_to_user 4-0 15423 NULL
105144 +tcp_mtu_to_mss_15438 tcp_mtu_to_mss 2-0 15438 NULL
105145 +hpsa_change_queue_depth_15449 hpsa_change_queue_depth 2 15449 NULL
105146 +memweight_15450 memweight 2 15450 NULL
105147 +vmalloc_15464 vmalloc 1 15464 NULL
105148 +__mutex_lock_killable_slowpath_15472 __mutex_lock_killable_slowpath 0 15472 NULL
105149 +insert_old_idx_znode_15500 insert_old_idx_znode 0 15500 NULL
105150 +zd_chip_is_zd1211b_15518 zd_chip_is_zd1211b 0 15518 NULL
105151 +ifx_spi_write_15531 ifx_spi_write 3 15531 NULL
105152 +da9052_bat_irq_15533 da9052_bat_irq 1 15533 NULL
105153 +p9_check_zc_errors_15534 p9_check_zc_errors 4 15534 NULL
105154 +xfrm_state_mtu_15548 xfrm_state_mtu 0-2 15548 NULL
105155 +snd_pcm_channel_info_15572 snd_pcm_channel_info 0 15572 NULL
105156 +persistent_status_15574 persistent_status 4 15574 NULL
105157 +bnx2fc_process_unsol_compl_15576 bnx2fc_process_unsol_compl 2 15576 NULL
105158 +cl_io_submit_sync_15579 cl_io_submit_sync 0 15579 NULL
105159 +vme_user_write_15587 vme_user_write 3 15587 NULL
105160 +ocfs2_truncate_rec_15595 ocfs2_truncate_rec 7-0 15595 NULL
105161 +sx150x_install_irq_chip_15609 sx150x_install_irq_chip 3 15609 NULL
105162 +iommu_device_max_index_15620 iommu_device_max_index 0-3-2-1 15620 NULL nohasharray
105163 +compat_fillonedir_15620 compat_fillonedir 3 15620 &iommu_device_max_index_15620
105164 +proc_loginuid_read_15631 proc_loginuid_read 3 15631 NULL
105165 +tomoyo_scan_bprm_15642 tomoyo_scan_bprm 4-2 15642 NULL nohasharray
105166 +sk_memory_allocated_add_15642 sk_memory_allocated_add 2 15642 &tomoyo_scan_bprm_15642 nohasharray
105167 +pipeline_hs_tx_stat_fifo_int_read_15642 pipeline_hs_tx_stat_fifo_int_read 3 15642 &sk_memory_allocated_add_15642
105168 +joydev_handle_JSIOCSBTNMAP_15643 joydev_handle_JSIOCSBTNMAP 3 15643 NULL
105169 +fs_path_add_15648 fs_path_add 3-0 15648 NULL
105170 +xsd_read_15653 xsd_read 3 15653 NULL
105171 +unix_bind_15668 unix_bind 3 15668 NULL
105172 +SyS_connect_15674 SyS_connect 3 15674 NULL nohasharray
105173 +dm_read_15674 dm_read 3 15674 &SyS_connect_15674
105174 +pstore_mkfile_15675 pstore_mkfile 6 15675 NULL
105175 +i915_gem_object_set_to_cpu_domain_15705 i915_gem_object_set_to_cpu_domain 0 15705 NULL nohasharray
105176 +uncore_alloc_box_15705 uncore_alloc_box 2 15705 &i915_gem_object_set_to_cpu_domain_15705
105177 +ocfs2_split_tree_15716 ocfs2_split_tree 5-0 15716 NULL
105178 +tracing_snapshot_write_15719 tracing_snapshot_write 3 15719 NULL
105179 +HiSax_readstatus_15752 HiSax_readstatus 2 15752 NULL
105180 +ftrace_profile_init_cpu_15761 ftrace_profile_init_cpu 0 15761 NULL
105181 +bitmap_search_next_usable_block_15762 bitmap_search_next_usable_block 3-1-0 15762 NULL
105182 +i915_gem_init_seqno_15793 i915_gem_init_seqno 0 15793 NULL
105183 +smk_read_direct_15803 smk_read_direct 3 15803 NULL
105184 +nameseq_list_15817 nameseq_list 3-0 15817 NULL nohasharray
105185 +gnttab_expand_15817 gnttab_expand 1 15817 &nameseq_list_15817
105186 +afs_proc_rootcell_write_15822 afs_proc_rootcell_write 3 15822 NULL
105187 +brcmf_sdbrcm_died_dump_15841 brcmf_sdbrcm_died_dump 3 15841 NULL
105188 +table_size_15851 table_size 0-1-2 15851 NULL
105189 +ubi_io_write_15870 ubi_io_write 0-5-4 15870 NULL nohasharray
105190 +media_entity_init_15870 media_entity_init 2-4 15870 &ubi_io_write_15870
105191 +__mptctl_ioctl_15875 __mptctl_ioctl 2 15875 NULL
105192 +profile_pc_15887 profile_pc 0 15887 NULL
105193 +nfs_map_group_to_gid_15892 nfs_map_group_to_gid 3 15892 NULL
105194 +native_read_msr_15905 native_read_msr 0 15905 NULL
105195 +parse_audio_stream_data_15937 parse_audio_stream_data 3 15937 NULL
105196 +power_read_15939 power_read 3 15939 NULL
105197 +lpfc_idiag_drbacc_read_15948 lpfc_idiag_drbacc_read 3 15948 NULL
105198 +snd_pcm_lib_read_transfer_15952 snd_pcm_lib_read_transfer 5-2-4 15952 NULL
105199 +tfrc_calc_x_15975 tfrc_calc_x 2-1 15975 NULL
105200 +frame_alloc_15981 frame_alloc 4 15981 NULL
105201 +hdpvr_register_videodev_16010 hdpvr_register_videodev 3 16010 NULL
105202 +viafb_vt1636_proc_write_16018 viafb_vt1636_proc_write 3 16018 NULL
105203 +i915_gem_object_pin_16032 i915_gem_object_pin 0 16032 NULL
105204 +dccp_recvmsg_16056 dccp_recvmsg 4 16056 NULL
105205 +read_file_spectral_period_16057 read_file_spectral_period 3 16057 NULL
105206 +si5351_msynth_params_address_16062 si5351_msynth_params_address 0-1 16062 NULL
105207 +ocfs2_sync_local_to_main_16076 ocfs2_sync_local_to_main 0 16076 NULL
105208 +isr_tx_exch_complete_read_16103 isr_tx_exch_complete_read 3 16103 NULL
105209 +dma_tx_requested_read_16110 dma_tx_requested_read 3 16110 NULL nohasharray
105210 +isr_hw_pm_mode_changes_read_16110 isr_hw_pm_mode_changes_read 3 16110 &dma_tx_requested_read_16110
105211 +irq_set_chip_and_handler_name_16111 irq_set_chip_and_handler_name 1 16111 NULL
105212 +snd_dma_pointer_16126 snd_dma_pointer 0-2 16126 NULL
105213 +compat_sys_select_16131 compat_sys_select 1 16131 NULL
105214 +fsm_init_16134 fsm_init 2 16134 NULL
105215 +ext4_xattr_block_get_16148 ext4_xattr_block_get 0 16148 NULL
105216 +update_block_group_16155 update_block_group 0 16155 NULL
105217 +optimal_reclaimed_pages_16172 optimal_reclaimed_pages 0 16172 NULL
105218 +mapping_level_16188 mapping_level 2-0 16188 NULL
105219 +i40e_allocate_virt_mem_d_16191 i40e_allocate_virt_mem_d 3 16191 NULL
105220 +tcp_syn_options_16197 tcp_syn_options 0 16197 NULL
105221 +ath10k_htt_rx_ring_size_16201 ath10k_htt_rx_ring_size 0 16201 NULL
105222 +cipso_v4_map_cat_rng_hton_16203 cipso_v4_map_cat_rng_hton 0 16203 NULL
105223 +SyS_pselect6_16210 SyS_pselect6 1 16210 NULL
105224 +create_table_16213 create_table 2 16213 NULL
105225 +atomic_read_file_16227 atomic_read_file 3 16227 NULL
105226 +BcmGetSectionValStartOffset_16235 BcmGetSectionValStartOffset 0 16235 NULL
105227 +lov_prep_brw_set_16246 lov_prep_brw_set 3 16246 NULL
105228 +i40e_dbg_dump_read_16247 i40e_dbg_dump_read 3 16247 NULL nohasharray
105229 +btrfs_dev_extent_chunk_offset_16247 btrfs_dev_extent_chunk_offset 0 16247 &i40e_dbg_dump_read_16247
105230 +il_dbgfs_disable_ht40_write_16249 il_dbgfs_disable_ht40_write 3 16249 NULL
105231 +SyS_fgetxattr_16254 SyS_fgetxattr 4 16254 NULL
105232 +reiserfs_acl_count_16265 reiserfs_acl_count 0-1 16265 NULL
105233 +mq_force_mapping_16277 mq_force_mapping 2 16277 NULL
105234 +ocfs2_xattr_bucket_value_truncate_16279 ocfs2_xattr_bucket_value_truncate 4 16279 NULL
105235 +drbd_setsockopt_16280 drbd_setsockopt 5 16280 NULL nohasharray
105236 +nand_bch_init_16280 nand_bch_init 2-3 16280 &drbd_setsockopt_16280
105237 +account_16283 account 0-4-2 16283 NULL nohasharray
105238 +mirror_status_16283 mirror_status 5 16283 &account_16283
105239 +retry_instruction_16285 retry_instruction 2 16285 NULL
105240 +jumpshot_read_data_16287 jumpshot_read_data 4 16287 NULL
105241 +mo_xattr_get_16288 mo_xattr_get 0 16288 NULL
105242 +stk_allocate_buffers_16291 stk_allocate_buffers 2 16291 NULL
105243 +rbd_segment_offset_16293 rbd_segment_offset 0-2 16293 NULL
105244 +tfrc_invert_loss_event_rate_16295 tfrc_invert_loss_event_rate 1 16295 NULL
105245 +rsc_mgr_init_16299 rsc_mgr_init 3 16299 NULL
105246 +wb_map_16301 wb_map 2 16301 NULL
105247 +kvm_handle_hva_range_16312 kvm_handle_hva_range 3-2 16312 NULL
105248 +ext4_blocks_count_16320 ext4_blocks_count 0 16320 NULL
105249 +vmw_cursor_update_image_16332 vmw_cursor_update_image 3-4 16332 NULL
105250 +btrfs_insert_item_16357 btrfs_insert_item 0 16357 NULL
105251 +sysfs_create_groups_16360 sysfs_create_groups 0 16360 NULL
105252 +total_ps_buffered_read_16365 total_ps_buffered_read 3 16365 NULL
105253 +iscsi_tcp_conn_setup_16376 iscsi_tcp_conn_setup 2 16376 NULL
105254 +diva_os_malloc_16406 diva_os_malloc 2 16406 NULL
105255 +ieee80211_if_read_tsf_16420 ieee80211_if_read_tsf 3 16420 NULL
105256 +rxrpc_server_keyring_16431 rxrpc_server_keyring 3 16431 NULL
105257 +__bio_add_page_16435 __bio_add_page 0-4 16435 NULL
105258 +btrfs_truncate_inode_items_16452 btrfs_truncate_inode_items 0-4 16452 NULL
105259 +ocfs2_expand_refcount_tree_16455 ocfs2_expand_refcount_tree 0 16455 NULL
105260 +netlink_change_ngroups_16457 netlink_change_ngroups 2 16457 NULL
105261 +alloc_disk_node_16458 alloc_disk_node 2 16458 NULL
105262 +req_capsule_get_size_16467 req_capsule_get_size 0 16467 NULL
105263 +ocfs2_block_group_set_bits_16488 ocfs2_block_group_set_bits 0 16488 NULL
105264 +add_qgroup_item_16492 add_qgroup_item 0 16492 NULL
105265 +tracing_readme_read_16493 tracing_readme_read 3 16493 NULL
105266 +filemap_write_and_wait_16506 filemap_write_and_wait 0 16506 NULL
105267 +start_this_handle_16519 start_this_handle 0 16519 NULL
105268 +snd_interval_max_16529 snd_interval_max 0 16529 NULL
105269 +raid10_resize_16537 raid10_resize 2 16537 NULL
105270 +lpfc_debugfs_read_16566 lpfc_debugfs_read 3 16566 NULL
105271 +perf_event_set_period_16574 perf_event_set_period 2-3 16574 NULL
105272 +agp_allocate_memory_wrap_16576 agp_allocate_memory_wrap 1 16576 NULL
105273 +lustre_msg_hdr_size_v2_16589 lustre_msg_hdr_size_v2 0 16589 NULL
105274 +btrfs_insert_orphan_item_16602 btrfs_insert_orphan_item 0 16602 NULL
105275 +gmux_index_read32_16604 gmux_index_read32 0 16604 NULL
105276 +btrfs_end_transaction_16610 btrfs_end_transaction 0 16610 NULL
105277 +palmas_irq_get_virq_16613 palmas_irq_get_virq 2 16613 NULL
105278 +btrfs_wait_marked_extents_16615 btrfs_wait_marked_extents 0 16615 NULL
105279 +rtw_set_wpa_ie_16633 rtw_set_wpa_ie 3 16633 NULL
105280 +btrfs_get_token_32_16651 btrfs_get_token_32 0 16651 NULL
105281 +packet_recv_error_16669 packet_recv_error 3 16669 NULL
105282 +__proc_lnet_buffers_16717 __proc_lnet_buffers 5 16717 NULL
105283 +__copy_to_user_swizzled_16748 __copy_to_user_swizzled 3-4 16748 NULL
105284 +arcmsr_adjust_disk_queue_depth_16756 arcmsr_adjust_disk_queue_depth 2 16756 NULL
105285 +blk_rq_map_user_iov_16772 blk_rq_map_user_iov 5 16772 NULL
105286 +i2o_parm_issue_16790 i2o_parm_issue 0 16790 NULL
105287 +get_server_iovec_16804 get_server_iovec 2 16804 NULL
105288 +tipc_send2name_16809 tipc_send2name 6 16809 NULL
105289 +dm_vcalloc_16814 dm_vcalloc 1-2 16814 NULL
105290 +cache_grow_16818 cache_grow 3 16818 NULL
105291 +drm_malloc_ab_16831 drm_malloc_ab 1-2 16831 NULL
105292 +scsi_mode_sense_16835 scsi_mode_sense 5 16835 NULL
105293 +hfsplus_min_io_size_16859 hfsplus_min_io_size 0 16859 NULL
105294 +alloc_idx_lebs_16872 alloc_idx_lebs 2 16872 NULL
105295 +carl9170_debugfs_ampdu_state_read_16873 carl9170_debugfs_ampdu_state_read 3 16873 NULL
105296 +st_write_16874 st_write 3 16874 NULL
105297 +__kfifo_peek_n_16877 __kfifo_peek_n 0 16877 NULL
105298 +transport_init_session_tags_16878 transport_init_session_tags 1-2 16878 NULL
105299 +ext4_ext_zeroout_16895 ext4_ext_zeroout 0 16895 NULL
105300 +psb_unlocked_ioctl_16926 psb_unlocked_ioctl 2 16926 NULL nohasharray
105301 +snd_gf1_mem_proc_dump_16926 snd_gf1_mem_proc_dump 5 16926 &psb_unlocked_ioctl_16926
105302 +_sp2d_alloc_16944 _sp2d_alloc 3-2-1 16944 NULL
105303 +squashfs_read_table_16945 squashfs_read_table 3 16945 NULL
105304 +wrm_16966 wrm 0 16966 NULL
105305 +keyctl_instantiate_key_iov_16969 keyctl_instantiate_key_iov 3 16969 NULL
105306 +ocfs2_read_quota_phys_block_16990 ocfs2_read_quota_phys_block 0 16990 NULL
105307 +ceph_read_dir_17005 ceph_read_dir 3 17005 NULL
105308 +snd_mask_refine_first_17026 snd_mask_refine_first 0 17026 NULL
105309 +copy_counters_to_user_17027 copy_counters_to_user 5 17027 NULL
105310 +btrfs_unlink_inode_17043 btrfs_unlink_inode 0 17043 NULL
105311 +jffs2_trusted_setxattr_17048 jffs2_trusted_setxattr 4 17048 NULL
105312 +__arch_hweight32_17060 __arch_hweight32 0 17060 NULL
105313 +__copy_user_nocache_17065 __copy_user_nocache 0 17065 NULL
105314 +sddr55_read_data_17072 sddr55_read_data 4 17072 NULL
105315 +dvb_dvr_read_17073 dvb_dvr_read 3 17073 NULL
105316 +simple_transaction_read_17076 simple_transaction_read 3 17076 NULL
105317 +__kmalloc_reserve_17080 __kmalloc_reserve 3 17080 NULL
105318 +carl9170_debugfs_mem_usage_read_17084 carl9170_debugfs_mem_usage_read 3 17084 NULL
105319 +entry_length_17093 entry_length 0 17093 NULL
105320 +ocfs2_get_refcount_cpos_end_17113 ocfs2_get_refcount_cpos_end 0 17113 NULL
105321 +write_mem_17114 write_mem 3 17114 NULL
105322 +pvr2_hdw_state_report_17121 pvr2_hdw_state_report 3 17121 NULL
105323 +wrmaltWithLock_17139 wrmaltWithLock 0 17139 NULL
105324 +nouveau_instobj_create__17144 nouveau_instobj_create_ 4 17144 NULL
105325 +jumpshot_write_data_17151 jumpshot_write_data 4 17151 NULL
105326 +sep_read_17161 sep_read 3 17161 NULL
105327 +befs_nls2utf_17163 befs_nls2utf 3 17163 NULL
105328 +tx_tx_start_templates_read_17164 tx_tx_start_templates_read 3 17164 NULL
105329 +UniStrnlen_17169 UniStrnlen 0 17169 NULL
105330 +ocfs2_flock_handle_signal_17189 ocfs2_flock_handle_signal 0 17189 NULL nohasharray
105331 +access_remote_vm_17189 access_remote_vm 0-4-2 17189 &ocfs2_flock_handle_signal_17189 nohasharray
105332 +iwl_dbgfs_txfifo_flush_write_17189 iwl_dbgfs_txfifo_flush_write 3 17189 &access_remote_vm_17189
105333 +driver_state_read_17194 driver_state_read 3 17194 NULL nohasharray
105334 +iscsit_find_cmd_from_itt_or_dump_17194 iscsit_find_cmd_from_itt_or_dump 3 17194 &driver_state_read_17194
105335 +sync_request_17208 sync_request 2 17208 NULL
105336 +dn_recvmsg_17213 dn_recvmsg 4 17213 NULL
105337 +send_write_or_clone_17241 send_write_or_clone 0 17241 NULL
105338 +to_oblock_17254 to_oblock 0-1 17254 NULL
105339 +unpack_value_17259 unpack_value 1 17259 NULL
105340 +lprocfs_read_frac_helper_17261 lprocfs_read_frac_helper 0 17261 NULL
105341 +error_error_frame_cts_nul_flid_read_17262 error_error_frame_cts_nul_flid_read 3 17262 NULL
105342 +alloc_reserved_file_extent_17267 alloc_reserved_file_extent 0 17267 NULL
105343 +alloc_ep_17269 alloc_ep 1 17269 NULL
105344 +hw_test_and_write_17271 hw_test_and_write 3 17271 NULL
105345 +pg_read_17276 pg_read 3 17276 NULL
105346 +raw_recvmsg_17277 raw_recvmsg 4 17277 NULL
105347 +hmac_sha256_17278 hmac_sha256 2 17278 NULL
105348 +neigh_hash_grow_17283 neigh_hash_grow 2 17283 NULL
105349 +minstrel_stats_read_17290 minstrel_stats_read 3 17290 NULL
105350 +install_breakpoint_17292 install_breakpoint 4 17292 NULL
105351 +ieee80211_if_fmt_dot11MeshForwarding_17301 ieee80211_if_fmt_dot11MeshForwarding 3 17301 NULL
105352 +mb_cache_create_17307 mb_cache_create 2 17307 NULL
105353 +gnttab_map_frames_v2_17314 gnttab_map_frames_v2 2 17314 NULL
105354 +SYSC_pread64_17337 SYSC_pread64 3 17337 NULL
105355 +ieee80211_if_read_dot11MeshHWMPperrMinInterval_17346 ieee80211_if_read_dot11MeshHWMPperrMinInterval 3 17346 NULL
105356 +ath6kl_wmi_send_mgmt_cmd_17347 ath6kl_wmi_send_mgmt_cmd 7 17347 NULL
105357 +mdc_import_seq_write_17409 mdc_import_seq_write 3 17409 NULL
105358 +lpfc_debugfs_dif_err_write_17424 lpfc_debugfs_dif_err_write 3 17424 NULL
105359 +compat_sys_ppoll_17430 compat_sys_ppoll 2 17430 NULL
105360 +sta_connected_time_read_17435 sta_connected_time_read 3 17435 NULL
105361 +SYSC_fcntl_17441 SYSC_fcntl 3 17441 NULL
105362 +libcfs_ipif_enumerate_17445 libcfs_ipif_enumerate 0 17445 NULL
105363 +nla_get_u32_17455 nla_get_u32 0 17455 NULL
105364 +__ref_totlen_17461 __ref_totlen 0 17461 NULL
105365 +probe_kernel_write_17481 probe_kernel_write 3 17481 NULL
105366 +TSS_rawhmac_17486 TSS_rawhmac 3 17486 NULL
105367 +lookup_tree_block_ref_17494 lookup_tree_block_ref 0 17494 NULL
105368 +bitmap_pos_to_ord_17503 bitmap_pos_to_ord 3 17503 NULL
105369 +lbs_highrssi_write_17515 lbs_highrssi_write 3 17515 NULL
105370 +qp_free_res_17541 qp_free_res 5 17541 NULL
105371 +xlog_do_log_recovery_17550 xlog_do_log_recovery 3 17550 NULL
105372 +__copy_to_user_17551 __copy_to_user 3-0 17551 NULL
105373 +copy_from_user_17559 copy_from_user 3-0 17559 NULL
105374 +snd_pcm_action_lock_irq_17569 snd_pcm_action_lock_irq 0 17569 NULL
105375 +acpi_ut_create_package_object_17594 acpi_ut_create_package_object 1 17594 NULL
105376 +neigh_hash_alloc_17595 neigh_hash_alloc 1 17595 NULL
105377 +__inode_info_17603 __inode_info 0 17603 NULL
105378 +wm8994_gpio_to_irq_17604 wm8994_gpio_to_irq 2 17604 NULL
105379 +osst_execute_17607 osst_execute 7-6 17607 NULL
105380 +ocfs2_mark_extent_written_17615 ocfs2_mark_extent_written 6 17615 NULL
105381 +ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout_17618 ieee80211_if_read_dot11MeshHWMPactivePathToRootTimeout 3 17618 NULL
105382 +dma_map_page_17628 dma_map_page 0 17628 NULL
105383 +ocfs2_rotate_subtree_left_17634 ocfs2_rotate_subtree_left 0 17634 NULL
105384 +twl4030_set_gpio_direction_17645 twl4030_set_gpio_direction 1 17645 NULL
105385 +SYSC_migrate_pages_17657 SYSC_migrate_pages 2 17657 NULL
105386 +packet_setsockopt_17662 packet_setsockopt 5 17662 NULL nohasharray
105387 +ubi_io_read_data_17662 ubi_io_read_data 0 17662 &packet_setsockopt_17662
105388 +kernel_write_17665 kernel_write 3 17665 NULL
105389 +pwr_enable_ps_read_17686 pwr_enable_ps_read 3 17686 NULL
105390 +filemap_fdatawait_17688 filemap_fdatawait 0 17688 NULL
105391 +gfn_to_pfn_memslot_17693 gfn_to_pfn_memslot 2-0 17693 NULL
105392 +__einj_error_trigger_17707 __einj_error_trigger 0 17707 NULL nohasharray
105393 +venus_rename_17707 venus_rename 4-5 17707 &__einj_error_trigger_17707
105394 +exofs_read_lookup_dev_table_17733 exofs_read_lookup_dev_table 3 17733 NULL
105395 +sctpprobe_read_17741 sctpprobe_read 3 17741 NULL
105396 +mark_unsafe_pages_17759 mark_unsafe_pages 0 17759 NULL
105397 +dgap_do_fep_load_17765 dgap_do_fep_load 3 17765 NULL
105398 +dtf_read_run_17768 dtf_read_run 3 17768 NULL
105399 +brcmf_sdio_chip_verifynvram_17776 brcmf_sdio_chip_verifynvram 4 17776 NULL
105400 +hash_ipport6_expire_17784 hash_ipport6_expire 3 17784 NULL
105401 +perf_clock_17787 perf_clock 0 17787 NULL
105402 +ubifs_leb_change_17789 ubifs_leb_change 0-4 17789 NULL
105403 +get_unaligned_be64_17794 get_unaligned_be64 0 17794 NULL nohasharray
105404 +shrink_slab_node_17794 shrink_slab_node 3-4 17794 &get_unaligned_be64_17794
105405 +bkey_to_cacheline_17811 bkey_to_cacheline 0 17811 NULL
105406 +_snd_pcm_lib_alloc_vmalloc_buffer_17820 _snd_pcm_lib_alloc_vmalloc_buffer 2 17820 NULL
105407 +gnet_stats_copy_app_17821 gnet_stats_copy_app 3 17821 NULL
105408 +cipso_v4_gentag_rbm_17836 cipso_v4_gentag_rbm 0 17836 NULL
105409 +dm_stats_message_17863 dm_stats_message 5 17863 NULL
105410 +sisusb_send_bulk_msg_17864 sisusb_send_bulk_msg 3 17864 NULL
105411 +alloc_sja1000dev_17868 alloc_sja1000dev 1 17868 NULL
105412 +ray_cs_essid_proc_write_17875 ray_cs_essid_proc_write 3 17875 NULL
105413 +orinoco_set_key_17878 orinoco_set_key 7-5 17878 NULL nohasharray
105414 +i40e_align_l2obj_base_17878 i40e_align_l2obj_base 0-1 17878 &orinoco_set_key_17878
105415 +init_per_cpu_17880 init_per_cpu 1 17880 NULL
105416 +ieee80211_if_fmt_dot11MeshMaxPeerLinks_17883 ieee80211_if_fmt_dot11MeshMaxPeerLinks 3 17883 NULL
105417 +ieee80211_if_fmt_dot11MeshHWMPRootMode_17890 ieee80211_if_fmt_dot11MeshHWMPRootMode 3 17890 NULL
105418 +ocfs2_clusters_to_blocks_17896 ocfs2_clusters_to_blocks 0-2 17896 NULL
105419 +recover_head_17904 recover_head 3 17904 NULL
105420 +xfs_buf_associate_memory_17915 xfs_buf_associate_memory 3 17915 NULL
105421 +scsi_bufflen_17933 scsi_bufflen 0 17933 NULL
105422 +__mutex_lock_check_stamp_17947 __mutex_lock_check_stamp 0 17947 NULL
105423 +beacon_interval_write_17952 beacon_interval_write 3 17952 NULL
105424 +ufs_free_blocks_17963 ufs_free_blocks 3-2 17963 NULL nohasharray
105425 +tlv_put_u64_17963 tlv_put_u64 0 17963 &ufs_free_blocks_17963
105426 +calc_nr_buckets_17976 calc_nr_buckets 0 17976 NULL
105427 +ext4_ext_calc_credits_for_single_extent_17983 ext4_ext_calc_credits_for_single_extent 0-2 17983 NULL
105428 +smk_write_cipso_17989 smk_write_cipso 3 17989 NULL
105429 +gnttab_max_grant_frames_17993 gnttab_max_grant_frames 0 17993 NULL
105430 +ext4_num_overhead_clusters_18001 ext4_num_overhead_clusters 2 18001 NULL
105431 +pvr2_v4l2_read_18006 pvr2_v4l2_read 3 18006 NULL
105432 +alloc_rx_desc_ring_18016 alloc_rx_desc_ring 2 18016 NULL
105433 +fill_read_18019 fill_read 0 18019 NULL
105434 +o2hb_highest_node_18034 o2hb_highest_node 2-0 18034 NULL
105435 +__posix_lock_file_18035 __posix_lock_file 0 18035 NULL
105436 +ocfs2_cache_cluster_dealloc_18043 ocfs2_cache_cluster_dealloc 0 18043 NULL
105437 +cryptd_alloc_instance_18048 cryptd_alloc_instance 3-2 18048 NULL
105438 +find_next_inuse_18051 find_next_inuse 2-3-0 18051 NULL
105439 +ddebug_proc_write_18055 ddebug_proc_write 3 18055 NULL
105440 +lua_sysfs_read_18062 lua_sysfs_read 6 18062 NULL
105441 +fpregs_get_18066 fpregs_get 4 18066 NULL
105442 +kvm_read_guest_page_18074 kvm_read_guest_page 5-2 18074 NULL
105443 +SYSC_pselect6_18076 SYSC_pselect6 1 18076 NULL
105444 +SYSC_semtimedop_18091 SYSC_semtimedop 3 18091 NULL
105445 +mpi_alloc_18094 mpi_alloc 1 18094 NULL
105446 +hfs_direct_IO_18104 hfs_direct_IO 4 18104 NULL
105447 +dfs_file_read_18116 dfs_file_read 3 18116 NULL
105448 +svc_getnl_18120 svc_getnl 0 18120 NULL
105449 +paging32_gpte_to_gfn_lvl_18131 paging32_gpte_to_gfn_lvl 0-2-1 18131 NULL
105450 +vmw_surface_dma_size_18132 vmw_surface_dma_size 0 18132 NULL
105451 +selinux_inode_setsecurity_18148 selinux_inode_setsecurity 4 18148 NULL
105452 +is_idx_node_in_use_18165 is_idx_node_in_use 0 18165 NULL
105453 +pccard_store_cis_18176 pccard_store_cis 6 18176 NULL
105454 +snd_pcm_hw_refine_user_18204 snd_pcm_hw_refine_user 0 18204 NULL
105455 +orinoco_add_extscan_result_18207 orinoco_add_extscan_result 3 18207 NULL
105456 +gsm_control_message_18209 gsm_control_message 4 18209 NULL
105457 +ocfs2_divide_leaf_refcount_block_18214 ocfs2_divide_leaf_refcount_block 0 18214 NULL
105458 +do_ipv6_setsockopt_18215 do_ipv6_setsockopt 5 18215 NULL
105459 +gnttab_alloc_grant_references_18240 gnttab_alloc_grant_references 1 18240 NULL
105460 +alloc_trace_uprobe_18247 alloc_trace_uprobe 3 18247 NULL
105461 +rfcomm_sock_setsockopt_18254 rfcomm_sock_setsockopt 5 18254 NULL
105462 +__sysfs_add_one_18258 __sysfs_add_one 0 18258 NULL
105463 +qdisc_class_hash_alloc_18262 qdisc_class_hash_alloc 1 18262 NULL
105464 +gfs2_alloc_sort_buffer_18275 gfs2_alloc_sort_buffer 1 18275 NULL
105465 +alloc_ring_18278 alloc_ring 4-2-8 18278 NULL
105466 +find_dirty_idx_leb_18280 find_dirty_idx_leb 0 18280 NULL
105467 +nouveau_subdev_create__18281 nouveau_subdev_create_ 7 18281 NULL nohasharray
105468 +bio_phys_segments_18281 bio_phys_segments 0 18281 &nouveau_subdev_create__18281
105469 +ext4_readpages_18283 ext4_readpages 4 18283 NULL
105470 +mmc_send_bus_test_18285 mmc_send_bus_test 4 18285 NULL
105471 +um_idi_write_18293 um_idi_write 3 18293 NULL
105472 +nouveau_disp_create__18305 nouveau_disp_create_ 4-7 18305 NULL
105473 +vga_r_18310 vga_r 0 18310 NULL
105474 +class_add_profile_18315 class_add_profile 1-3-5 18315 NULL
105475 +csio_mem_read_18319 csio_mem_read 3 18319 NULL
105476 +alloc_and_copy_string_18321 alloc_and_copy_string 2 18321 NULL
105477 +ecryptfs_send_message_18322 ecryptfs_send_message 2 18322 NULL
105478 +bio_integrity_advance_18324 bio_integrity_advance 2 18324 NULL
105479 +__inorder_to_tree_18329 __inorder_to_tree 0-3-1 18329 NULL
105480 +lcd_proc_write_18351 lcd_proc_write 3 18351 NULL
105481 +pwr_power_save_off_read_18355 pwr_power_save_off_read 3 18355 NULL
105482 +xlbd_reserve_minors_18365 xlbd_reserve_minors 1-2 18365 NULL
105483 +SyS_process_vm_readv_18366 SyS_process_vm_readv 5-3 18366 NULL
105484 +ep_io_18367 ep_io 0 18367 NULL
105485 +qib_user_sdma_num_pages_18371 qib_user_sdma_num_pages 0 18371 NULL
105486 +ci_role_write_18388 ci_role_write 3 18388 NULL nohasharray
105487 +irq_find_mapping_18388 irq_find_mapping 0-2 18388 &ci_role_write_18388
105488 +btrfs_update_inode_18394 btrfs_update_inode 0 18394 NULL
105489 +hdlc_empty_fifo_18397 hdlc_empty_fifo 2 18397 NULL
105490 +__video_register_device_18399 __video_register_device 3 18399 NULL
105491 +hash_ip4_expire_18402 hash_ip4_expire 3 18402 NULL nohasharray
105492 +adis16136_show_serial_18402 adis16136_show_serial 3 18402 &hash_ip4_expire_18402
105493 +btrfs_rmap_block_18403 btrfs_rmap_block 0 18403 NULL
105494 +crystalhd_user_data_18407 crystalhd_user_data 3 18407 NULL
105495 +batadv_orig_node_add_if_18433 batadv_orig_node_add_if 2 18433 NULL nohasharray
105496 +iscsi_create_flashnode_sess_18433 iscsi_create_flashnode_sess 4 18433 &batadv_orig_node_add_if_18433
105497 +snd_hda_get_connections_18437 snd_hda_get_connections 0 18437 NULL
105498 +btrfs_uuid_tree_lookup_18451 btrfs_uuid_tree_lookup 0 18451 NULL
105499 +flash_dev_cache_miss_18454 flash_dev_cache_miss 4 18454 NULL
105500 +fuse_perform_write_18457 fuse_perform_write 4 18457 NULL
105501 +regset_tls_set_18459 regset_tls_set 4 18459 NULL
105502 +pcibios_window_alignment_18460 pcibios_window_alignment 0 18460 NULL
105503 +dma_alloc_from_contiguous_18466 dma_alloc_from_contiguous 3-2 18466 NULL
105504 +pci_vpd_lrdt_size_18479 pci_vpd_lrdt_size 0 18479 NULL nohasharray
105505 +mite_bytes_in_transit_18479 mite_bytes_in_transit 0 18479 &pci_vpd_lrdt_size_18479
105506 +r600_texture_size_18487 r600_texture_size 4-5-6 18487 NULL nohasharray
105507 +udpv6_setsockopt_18487 udpv6_setsockopt 5 18487 &r600_texture_size_18487
105508 +btrfs_fiemap_18501 btrfs_fiemap 3 18501 NULL nohasharray
105509 +remap_pmd_range_18501 remap_pmd_range 3-4-5 18501 &btrfs_fiemap_18501
105510 +i915_gem_object_wait_fence_18508 i915_gem_object_wait_fence 0 18508 NULL
105511 +__copy_user_zeroing_intel_18510 __copy_user_zeroing_intel 0-3 18510 NULL
105512 +snd_vx_inb_18514 snd_vx_inb 0 18514 NULL
105513 +snd_gus_dram_poke_18525 snd_gus_dram_poke 4 18525 NULL
105514 +nouveau_fifo_channel_create__18530 nouveau_fifo_channel_create_ 9 18530 NULL
105515 +seq_copy_in_user_18543 seq_copy_in_user 3 18543 NULL
105516 +acpi_register_gsi_ioapic_18550 acpi_register_gsi_ioapic 2 18550 NULL
105517 +sas_change_queue_depth_18555 sas_change_queue_depth 2 18555 NULL
105518 +vb2_streamon_18562 vb2_streamon 0 18562 NULL
105519 +smk_write_rules_list_18565 smk_write_rules_list 3 18565 NULL
105520 +debug_output_18575 debug_output 3 18575 NULL
105521 +is_extent_unchanged_18576 is_extent_unchanged 0 18576 NULL
105522 +check_lpt_type_18577 check_lpt_type 0 18577 NULL
105523 +numa_node_id_18596 numa_node_id 0 18596 NULL
105524 +filemap_fdatawait_range_18600 filemap_fdatawait_range 0 18600 NULL nohasharray
105525 +slabinfo_write_18600 slabinfo_write 3 18600 &filemap_fdatawait_range_18600
105526 +iowarrior_write_18604 iowarrior_write 3 18604 NULL
105527 +audio_get_endpoint_req_18624 audio_get_endpoint_req 0 18624 NULL
105528 +from_buffer_18625 from_buffer 3 18625 NULL
105529 +snd_pcm_oss_write3_18657 snd_pcm_oss_write3 0-3 18657 NULL
105530 +ieee80211_if_fmt_rssi_threshold_18664 ieee80211_if_fmt_rssi_threshold 3 18664 NULL
105531 +unmap_page_18665 unmap_page 2-3 18665 NULL
105532 +xfs_iext_insert_18667 xfs_iext_insert 3 18667 NULL
105533 +__alloc_skb_head_18683 __alloc_skb_head 2 18683 NULL
105534 +echo_client_prep_commit_18693 echo_client_prep_commit 8 18693 NULL
105535 +replay_log_leb_18704 replay_log_leb 3 18704 NULL
105536 +iwl_dbgfs_rx_handlers_read_18708 iwl_dbgfs_rx_handlers_read 3 18708 NULL
105537 +ceph_alloc_page_vector_18710 ceph_alloc_page_vector 1 18710 NULL
105538 +ocfs2_trim_extent_18711 ocfs2_trim_extent 3-4 18711 NULL
105539 +blk_rq_bytes_18715 blk_rq_bytes 0 18715 NULL
105540 +byt_gpio_to_irq_18721 byt_gpio_to_irq 2 18721 NULL
105541 +ext4_es_insert_extent_18729 ext4_es_insert_extent 0 18729 NULL
105542 +snd_als4k_gcr_read_addr_18741 snd_als4k_gcr_read_addr 0 18741 NULL
105543 +o2hb_debug_create_18744 o2hb_debug_create 4 18744 NULL
105544 +__erst_read_to_erange_from_nvram_18748 __erst_read_to_erange_from_nvram 0 18748 NULL
105545 +wep_packets_read_18751 wep_packets_read 3 18751 NULL
105546 +read_file_dump_nfcal_18766 read_file_dump_nfcal 3 18766 NULL
105547 +ffs_epfile_read_18775 ffs_epfile_read 3 18775 NULL
105548 +SyS_lsetxattr_18776 SyS_lsetxattr 4 18776 NULL
105549 +alloc_fcdev_18780 alloc_fcdev 1 18780 NULL
105550 +prealloc_18800 prealloc 0 18800 NULL
105551 +madvise_hwpoison_18812 madvise_hwpoison 2 18812 NULL
105552 +setup_ioapic_irq_18813 setup_ioapic_irq 1 18813 NULL
105553 +dm_stats_print_18815 dm_stats_print 7 18815 NULL
105554 +sys_modify_ldt_18824 sys_modify_ldt 3 18824 NULL
105555 +mtf_test_write_18844 mtf_test_write 3 18844 NULL
105556 +iterate_inode_refs_18846 iterate_inode_refs 0 18846 NULL
105557 +drm_ht_create_18853 drm_ht_create 2 18853 NULL
105558 +sctp_setsockopt_events_18862 sctp_setsockopt_events 3 18862 NULL
105559 +ieee80211_if_read_element_ttl_18869 ieee80211_if_read_element_ttl 3 18869 NULL nohasharray
105560 +heads_to_leaves_18869 heads_to_leaves 2-0 18869 &ieee80211_if_read_element_ttl_18869
105561 +xlog_find_verify_log_record_18870 xlog_find_verify_log_record 2 18870 NULL
105562 +width_to_agaw_18883 width_to_agaw 0-1 18883 NULL
105563 +overwrite_item_18896 overwrite_item 0 18896 NULL
105564 +kmem_cache_alloc_node_18899 kmem_cache_alloc_node 3 18899 NULL
105565 +ceph_setxattr_18913 ceph_setxattr 4 18913 NULL
105566 +ext4_block_in_group_18922 ext4_block_in_group 2 18922 NULL
105567 +ieee80211_rx_mgmt_disassoc_18927 ieee80211_rx_mgmt_disassoc 3 18927 NULL
105568 +create_pending_snapshot_18936 create_pending_snapshot 0 18936 NULL
105569 +snapshot_write_next_18937 snapshot_write_next 0 18937 NULL
105570 +regcache_sync_block_18963 regcache_sync_block 4-3 18963 NULL
105571 +__nla_reserve_18974 __nla_reserve 3 18974 NULL
105572 +__blockdev_direct_IO_18977 __blockdev_direct_IO 0-6 18977 NULL
105573 +gfn_to_pfn_atomic_18981 gfn_to_pfn_atomic 2 18981 NULL
105574 +get_inode_path_18988 get_inode_path 0 18988 NULL
105575 +find_dirtiest_idx_leb_19001 find_dirtiest_idx_leb 0 19001 NULL nohasharray
105576 +test_check_exists_19001 test_check_exists 2 19001 &find_dirtiest_idx_leb_19001
105577 +layout_in_gaps_19006 layout_in_gaps 2 19006 NULL
105578 +huge_page_size_19008 huge_page_size 0 19008 NULL
105579 +push_leaf_right_19017 push_leaf_right 0 19017 NULL
105580 +prepare_highmem_image_19028 prepare_highmem_image 0 19028 NULL
105581 +ocfs2_steal_resource_19036 ocfs2_steal_resource 0 19036 NULL
105582 +revalidate_19043 revalidate 2 19043 NULL
105583 +afs_vnode_store_data_19048 afs_vnode_store_data 2-3-4-5 19048 NULL
105584 +osc_pinger_recov_seq_write_19056 osc_pinger_recov_seq_write 3 19056 NULL
105585 +create_gpadl_header_19064 create_gpadl_header 2 19064 NULL
105586 +ieee80211_key_alloc_19065 ieee80211_key_alloc 3 19065 NULL
105587 +ceph_create_snap_context_19082 ceph_create_snap_context 1 19082 NULL
105588 +kvm_lapic_set_vapic_addr_19083 kvm_lapic_set_vapic_addr 2 19083 NULL
105589 +sta_last_seq_ctrl_read_19106 sta_last_seq_ctrl_read 3 19106 NULL
105590 +cifs_readv_from_socket_19109 cifs_readv_from_socket 3 19109 NULL
105591 +ATOMIC_SUB_RETURN_19115 ATOMIC_SUB_RETURN 2 19115 NULL
105592 +ext4_inode_table_19125 ext4_inode_table 0 19125 NULL
105593 +snd_als4k_iobase_readl_19136 snd_als4k_iobase_readl 0 19136 NULL
105594 +btrfs_run_delayed_refs_19137 btrfs_run_delayed_refs 0 19137 NULL
105595 +alloc_irdadev_19140 alloc_irdadev 1 19140 NULL
105596 +sleep_auth_read_19159 sleep_auth_read 3 19159 NULL
105597 +ext3_reserve_inode_write_19163 ext3_reserve_inode_write 0 19163 NULL
105598 +smk_write_access2_19170 smk_write_access2 3 19170 NULL
105599 +iwl_dbgfs_reply_tx_error_read_19205 iwl_dbgfs_reply_tx_error_read 3 19205 NULL
105600 +vmw_unlocked_ioctl_19212 vmw_unlocked_ioctl 2 19212 NULL
105601 +__copy_to_user_inatomic_19214 __copy_to_user_inatomic 3-0 19214 NULL
105602 +dev_counters_read_19216 dev_counters_read 3 19216 NULL
105603 +wbcir_tx_19219 wbcir_tx 3 19219 NULL
105604 +gsi_to_irq_19220 gsi_to_irq 0-1 19220 NULL
105605 +snd_mask_max_19224 snd_mask_max 0 19224 NULL
105606 +snd_pcm_capture_rewind_19229 snd_pcm_capture_rewind 0-2 19229 NULL
105607 +bio_alloc_mddev_19238 bio_alloc_mddev 2 19238 NULL
105608 +ucma_query_19260 ucma_query 4 19260 NULL
105609 +write_one_cache_group_19261 write_one_cache_group 0 19261 NULL
105610 +il_dbgfs_rxon_filter_flags_read_19281 il_dbgfs_rxon_filter_flags_read 3 19281 NULL
105611 +cfg80211_rx_unprot_mlme_mgmt_19288 cfg80211_rx_unprot_mlme_mgmt 3 19288 NULL
105612 +____cache_alloc_node_19297 ____cache_alloc_node 3 19297 NULL
105613 +qc_capture_19298 qc_capture 3 19298 NULL
105614 +ocfs2_prepare_inode_for_refcount_19303 ocfs2_prepare_inode_for_refcount 4-3 19303 NULL
105615 +event_tx_stuck_read_19305 event_tx_stuck_read 3 19305 NULL
105616 +gfn_to_gpa_19320 gfn_to_gpa 0-1 19320 NULL
105617 +debug_read_19322 debug_read 3 19322 NULL
105618 +SYSC_sendfile64_19327 SYSC_sendfile64 4 19327 NULL
105619 +cfg80211_inform_bss_19332 cfg80211_inform_bss 8 19332 NULL nohasharray
105620 +lbs_host_sleep_write_19332 lbs_host_sleep_write 3 19332 &cfg80211_inform_bss_19332
105621 +closure_sub_19359 closure_sub 2 19359 NULL
105622 +firmware_data_write_19360 firmware_data_write 6-5 19360 NULL
105623 +read_zero_19366 read_zero 3 19366 NULL
105624 +interpret_user_input_19393 interpret_user_input 2 19393 NULL
105625 +sync_fill_pt_info_19397 sync_fill_pt_info 0 19397 NULL
105626 +get_unaligned_be16_19400 get_unaligned_be16 0 19400 NULL
105627 +get_n_events_by_type_19401 get_n_events_by_type 0 19401 NULL
105628 +pep_recvmsg_19402 pep_recvmsg 4 19402 NULL
105629 +dvbdmx_write_19423 dvbdmx_write 3 19423 NULL
105630 +__phys_addr_19434 __phys_addr 0 19434 NULL
105631 +SyS_sched_getaffinity_19444 SyS_sched_getaffinity 2 19444 NULL
105632 +xfrm_alg_auth_len_19454 xfrm_alg_auth_len 0 19454 NULL
105633 +gnet_stats_copy_19458 gnet_stats_copy 4 19458 NULL
105634 +sky2_read16_19475 sky2_read16 0 19475 NULL
105635 +__read_status_pciv2_19492 __read_status_pciv2 0 19492 NULL
105636 +kstrtoll_from_user_19500 kstrtoll_from_user 2 19500 NULL
105637 +ext4_add_new_descs_19509 ext4_add_new_descs 3 19509 NULL
105638 +cfc_write_array_to_buffer_19529 cfc_write_array_to_buffer 3 19529 NULL nohasharray
105639 +apei_exec_pre_map_gars_19529 apei_exec_pre_map_gars 0 19529 &cfc_write_array_to_buffer_19529
105640 +nfc_llcp_build_tlv_19536 nfc_llcp_build_tlv 3 19536 NULL
105641 +howmany_64_19548 howmany_64 2 19548 NULL
105642 +btrfs_xattr_security_init_19553 btrfs_xattr_security_init 0 19553 NULL
105643 +gfn_to_index_19558 gfn_to_index 0-1-3-2 19558 NULL
105644 +kernel_read_19559 kernel_read 4-0 19559 NULL
105645 +ocfs2_control_message_19564 ocfs2_control_message 3 19564 NULL
105646 +ieee80211_if_read_tkip_mic_test_19565 ieee80211_if_read_tkip_mic_test 3 19565 NULL
105647 +nfsd_read_19568 nfsd_read 5 19568 NULL
105648 +cgroup_read_s64_19570 cgroup_read_s64 5 19570 NULL
105649 +bm_status_read_19583 bm_status_read 3 19583 NULL
105650 +batadv_tt_update_orig_19586 batadv_tt_update_orig 4 19586 NULL
105651 +load_xattr_datum_19594 load_xattr_datum 0 19594 NULL
105652 +__mei_cl_recv_19636 __mei_cl_recv 3 19636 NULL
105653 +usbvision_rvmalloc_19655 usbvision_rvmalloc 1 19655 NULL
105654 +LoadBitmap_19658 LoadBitmap 2 19658 NULL
105655 +i915_gem_object_bind_to_gtt_19682 i915_gem_object_bind_to_gtt 0 19682 NULL
105656 +bio_detain_19690 bio_detain 2 19690 NULL
105657 +btrfs_del_inode_extref_19692 btrfs_del_inode_extref 0 19692 NULL
105658 +mem_cgroup_swappiness_19718 mem_cgroup_swappiness 0 19718 NULL
105659 +btrfs_write_marked_extents_19720 btrfs_write_marked_extents 0 19720 NULL
105660 +read_reg_19723 read_reg 0 19723 NULL
105661 +wm8350_block_write_19727 wm8350_block_write 2-3 19727 NULL
105662 +memcpy_toiovecend_19736 memcpy_toiovecend 3-4-0 19736 NULL
105663 +snd_es1968_get_dma_ptr_19747 snd_es1968_get_dma_ptr 0 19747 NULL
105664 +p9_client_read_19750 p9_client_read 0-5 19750 NULL
105665 +pnpbios_proc_write_19758 pnpbios_proc_write 3 19758 NULL
105666 +ocfs2_readpages_19759 ocfs2_readpages 4 19759 NULL
105667 +jffs2_acl_from_medium_19762 jffs2_acl_from_medium 2 19762 NULL
105668 +readhscx_19769 readhscx 0 19769 NULL
105669 +ocfs2_read_group_descriptor_19771 ocfs2_read_group_descriptor 0 19771 NULL
105670 +__set_print_fmt_19776 __set_print_fmt 0 19776 NULL
105671 +saa7146_vmalloc_build_pgtable_19780 saa7146_vmalloc_build_pgtable 2 19780 NULL
105672 +irda_setsockopt_19824 irda_setsockopt 5 19824 NULL
105673 +pcpu_next_unpop_19831 pcpu_next_unpop 4 19831 NULL
105674 +vfs_getxattr_19832 vfs_getxattr 0 19832 NULL
105675 +security_context_to_sid_19839 security_context_to_sid 2 19839 NULL
105676 +crypt_alloc_buffer_19846 crypt_alloc_buffer 2 19846 NULL
105677 +cfg80211_mlme_register_mgmt_19852 cfg80211_mlme_register_mgmt 5 19852 NULL
105678 +__nla_put_19857 __nla_put 3 19857 NULL
105679 +mrp_request_join_19882 mrp_request_join 4 19882 NULL
105680 +blk_alloc_queue_node_19887 blk_alloc_queue_node 2 19887 NULL
105681 +aes_decrypt_interrupt_read_19910 aes_decrypt_interrupt_read 3 19910 NULL
105682 +ps_upsd_max_apturn_read_19918 ps_upsd_max_apturn_read 3 19918 NULL
105683 +mangle_name_19923 mangle_name 0 19923 NULL
105684 +cgroup_task_count_19930 cgroup_task_count 0 19930 NULL
105685 +guest_read_tsc_19931 guest_read_tsc 0 19931 NULL
105686 +iwl_dbgfs_rx_queue_read_19943 iwl_dbgfs_rx_queue_read 3 19943 NULL
105687 +cfg80211_rx_assoc_resp_19944 cfg80211_rx_assoc_resp 4 19944 NULL
105688 +alloc_elfnotes_buf_19974 alloc_elfnotes_buf 1 19974 NULL
105689 +get_jack_mode_name_19976 get_jack_mode_name 4 19976 NULL
105690 +attach_hdlc_protocol_19986 attach_hdlc_protocol 3 19986 NULL
105691 +rtw_set_wps_probe_resp_19989 rtw_set_wps_probe_resp 3 19989 NULL
105692 +diva_um_idi_read_20003 diva_um_idi_read 0 20003 NULL
105693 +lov_stripe_md_size_20009 lov_stripe_md_size 0-1 20009 NULL
105694 +SYSC_fgetxattr_20027 SYSC_fgetxattr 4 20027 NULL
105695 +split_scan_timeout_read_20029 split_scan_timeout_read 3 20029 NULL
105696 +mlx5_ib_db_map_user_20037 mlx5_ib_db_map_user 2 20037 NULL
105697 +btrfs_block_rsv_migrate_20050 btrfs_block_rsv_migrate 0 20050 NULL
105698 +__be32_to_cpup_20056 __be32_to_cpup 0 20056 NULL
105699 +alloc_ieee80211_20063 alloc_ieee80211 1 20063 NULL
105700 +iwl_mvm_power_mac_dbgfs_read_20067 iwl_mvm_power_mac_dbgfs_read 4 20067 NULL
105701 +btrfs_pin_extent_for_log_replay_20069 btrfs_pin_extent_for_log_replay 2 20069 NULL
105702 +target_message_20072 target_message 2 20072 NULL
105703 +rawv6_sendmsg_20080 rawv6_sendmsg 4 20080 NULL
105704 +fuse_conn_limit_read_20084 fuse_conn_limit_read 3 20084 NULL
105705 +aat2870_reg_write_file_20086 aat2870_reg_write_file 3 20086 NULL
105706 +team_options_register_20091 team_options_register 3 20091 NULL
105707 +qla2x00_adjust_sdev_qdepth_up_20097 qla2x00_adjust_sdev_qdepth_up 2 20097 NULL
105708 +read_int_20098 read_int 0 20098 NULL
105709 +root_nfs_copy_20111 root_nfs_copy 3 20111 NULL
105710 +hptiop_adjust_disk_queue_depth_20122 hptiop_adjust_disk_queue_depth 2 20122 NULL
105711 +self_check_vid_hdr_20131 self_check_vid_hdr 0 20131 NULL
105712 +tomoyo_commit_ok_20167 tomoyo_commit_ok 2 20167 NULL
105713 +read_flush_pipefs_20171 read_flush_pipefs 3 20171 NULL
105714 +wep_addr_key_count_read_20174 wep_addr_key_count_read 3 20174 NULL
105715 +create_trace_probe_20175 create_trace_probe 1 20175 NULL
105716 +crystalhd_map_dio_20181 crystalhd_map_dio 3 20181 NULL
105717 +udf_bitmap_new_block_20214 udf_bitmap_new_block 4 20214 NULL
105718 +pvr2_ctrl_value_to_sym_20229 pvr2_ctrl_value_to_sym 5 20229 NULL
105719 +rose_sendmsg_20249 rose_sendmsg 4 20249 NULL
105720 +tm6000_i2c_send_regs_20250 tm6000_i2c_send_regs 5 20250 NULL
105721 +pcpu_alloc_20255 pcpu_alloc 1-2 20255 NULL
105722 +resource_size_20256 resource_size 0 20256 NULL
105723 +uv_blade_to_memory_nid_20259 uv_blade_to_memory_nid 0 20259 NULL
105724 +r10_sync_page_io_20307 r10_sync_page_io 3 20307 NULL
105725 +dm_get_reserved_bio_based_ios_20315 dm_get_reserved_bio_based_ios 0 20315 NULL
105726 +tx_tx_burst_programmed_read_20320 tx_tx_burst_programmed_read 3 20320 NULL
105727 +vx_send_msg_nolock_20322 vx_send_msg_nolock 0 20322 NULL
105728 +snd_cs4281_BA1_read_20323 snd_cs4281_BA1_read 5 20323 NULL
105729 +ocfs2_et_insert_check_20341 ocfs2_et_insert_check 0 20341 NULL
105730 +gfs2_glock_nq_m_20347 gfs2_glock_nq_m 1 20347 NULL
105731 +handle_arr_calc_size_20355 handle_arr_calc_size 0-1 20355 NULL
105732 +snd_pcm_stop_20376 snd_pcm_stop 0 20376 NULL
105733 +smk_set_cipso_20379 smk_set_cipso 3 20379 NULL
105734 +block_read_full_page_20380 block_read_full_page 0 20380 NULL
105735 +snd_nm256_readl_20394 snd_nm256_readl 0 20394 NULL nohasharray
105736 +read_7220_creg32_20394 read_7220_creg32 0 20394 &snd_nm256_readl_20394
105737 +__kfifo_from_user_20399 __kfifo_from_user 3 20399 NULL nohasharray
105738 +SyS_get_mempolicy_20399 SyS_get_mempolicy 3-4 20399 &__kfifo_from_user_20399
105739 +btrfs_set_acl_20440 btrfs_set_acl 0 20440 NULL
105740 +nfs3_setxattr_20458 nfs3_setxattr 4 20458 NULL
105741 +compat_ipv6_setsockopt_20468 compat_ipv6_setsockopt 5 20468 NULL
105742 +read_buf_20469 read_buf 2 20469 NULL
105743 +btrfs_get_32_20476 btrfs_get_32 0 20476 NULL
105744 +fast_user_write_20494 fast_user_write 5 20494 NULL
105745 +ocfs2_db_frozen_trigger_20503 ocfs2_db_frozen_trigger 4 20503 NULL
105746 +pcpu_alloc_area_20511 pcpu_alloc_area 0-3 20511 NULL
105747 +link_free_space_20512 link_free_space 0 20512 NULL
105748 +pcpu_depopulate_chunk_20517 pcpu_depopulate_chunk 3-2 20517 NULL
105749 +xfs_iext_realloc_direct_20521 xfs_iext_realloc_direct 2 20521 NULL
105750 +drbd_bm_resize_20522 drbd_bm_resize 2 20522 NULL
105751 +amd_create_gatt_pages_20537 amd_create_gatt_pages 1 20537 NULL
105752 +scsi_report_opcode_20551 scsi_report_opcode 3 20551 NULL
105753 +venus_create_20555 venus_create 4 20555 NULL
105754 +btrfs_super_log_root_20565 btrfs_super_log_root 0 20565 NULL
105755 +crypto_ahash_reqsize_20569 crypto_ahash_reqsize 0 20569 NULL
105756 +batadv_tt_append_diff_20588 batadv_tt_append_diff 4 20588 NULL nohasharray
105757 +ocfs2_cluster_lock_20588 ocfs2_cluster_lock 0 20588 &batadv_tt_append_diff_20588
105758 +kvm_test_age_hva_20593 kvm_test_age_hva 2 20593 NULL
105759 +sync_timeline_create_20601 sync_timeline_create 2 20601 NULL
105760 +lirc_write_20604 lirc_write 3 20604 NULL
105761 +qib_qsfp_write_20614 qib_qsfp_write 0-2-4 20614 NULL
105762 +snd_pcm_oss_prepare_20641 snd_pcm_oss_prepare 0 20641 NULL
105763 +get_extent_skip_holes_20642 get_extent_skip_holes 2 20642 NULL
105764 +kfifo_copy_to_user_20646 kfifo_copy_to_user 4-3 20646 NULL
105765 +cpulist_scnprintf_20648 cpulist_scnprintf 2-0 20648 NULL
105766 +oz_add_farewell_20652 oz_add_farewell 5 20652 NULL
105767 +oz_cdev_read_20659 oz_cdev_read 3 20659 NULL
105768 +snd_hdsp_playback_copy_20676 snd_hdsp_playback_copy 5 20676 NULL nohasharray
105769 +btrfs_qgroup_reserve_20676 btrfs_qgroup_reserve 0 20676 &snd_hdsp_playback_copy_20676
105770 +get_user_page_nowait_20682 get_user_page_nowait 3 20682 NULL nohasharray
105771 +dvb_dmxdev_buffer_read_20682 dvb_dmxdev_buffer_read 0-4 20682 &get_user_page_nowait_20682
105772 +cpumask_size_20683 cpumask_size 0 20683 NULL
105773 +btrfs_node_blockptr_20685 btrfs_node_blockptr 0 20685 NULL
105774 +gru_vtop_20689 gru_vtop 2 20689 NULL
105775 +read_file_tgt_int_stats_20697 read_file_tgt_int_stats 3 20697 NULL
105776 +i915_gem_obj_ggtt_pin_20698 i915_gem_obj_ggtt_pin 0 20698 NULL
105777 +__maestro_read_20700 __maestro_read 0 20700 NULL
105778 +cipso_v4_gentag_rng_20703 cipso_v4_gentag_rng 0 20703 NULL
105779 +pcpu_page_first_chunk_20712 pcpu_page_first_chunk 1 20712 NULL
105780 +ocfs2_read_xattr_bucket_20722 ocfs2_read_xattr_bucket 0 20722 NULL
105781 +security_context_to_sid_force_20724 security_context_to_sid_force 2 20724 NULL
105782 +io_apic_set_pci_routing_20740 io_apic_set_pci_routing 2 20740 NULL
105783 +fb_prepare_logo_20743 fb_prepare_logo 0 20743 NULL
105784 +vol_cdev_direct_write_20751 vol_cdev_direct_write 3 20751 NULL
105785 +ocfs2_align_bytes_to_clusters_20754 ocfs2_align_bytes_to_clusters 2 20754 NULL
105786 +brcmf_p2p_escan_20763 brcmf_p2p_escan 2 20763 NULL
105787 +ubi_io_read_20767 ubi_io_read 0 20767 NULL
105788 +ext4_r_blocks_count_20768 ext4_r_blocks_count 0 20768 NULL
105789 +fb_alloc_cmap_gfp_20792 fb_alloc_cmap_gfp 2 20792 NULL
105790 +iommu_range_alloc_20794 iommu_range_alloc 3 20794 NULL
105791 +iwl_dbgfs_rxon_flags_read_20795 iwl_dbgfs_rxon_flags_read 3 20795 NULL
105792 +ext4_convert_unwritten_extents_endio_20812 ext4_convert_unwritten_extents_endio 0 20812 NULL
105793 +strndup_user_20819 strndup_user 2 20819 NULL
105794 +dtf_read_channel_20831 dtf_read_channel 3 20831 NULL
105795 +wl1271_format_buffer_20834 wl1271_format_buffer 2 20834 NULL
105796 +uvc_alloc_entity_20836 uvc_alloc_entity 3-4 20836 NULL
105797 +btrfs_orphan_add_20840 btrfs_orphan_add 0 20840 NULL
105798 +p9_tag_alloc_20845 p9_tag_alloc 3 20845 NULL
105799 +nvme_trans_supported_vpd_pages_20847 nvme_trans_supported_vpd_pages 4 20847 NULL
105800 +get_name_20855 get_name 4 20855 NULL
105801 +iwl_dbgfs_pm_params_read_20866 iwl_dbgfs_pm_params_read 3 20866 NULL
105802 +snd_pcm_capture_avail_20867 snd_pcm_capture_avail 0 20867 NULL
105803 +srq_free_res_20868 srq_free_res 5 20868 NULL
105804 +ocfs2_bmap_20874 ocfs2_bmap 2 20874 NULL
105805 +cfs_cpt_table_create_20884 cfs_cpt_table_create 1 20884 NULL
105806 +rb_simple_write_20890 rb_simple_write 3 20890 NULL
105807 +sisusb_send_packet_20891 sisusb_send_packet 2 20891 NULL
105808 +key_icverrors_read_20895 key_icverrors_read 3 20895 NULL
105809 +vfio_msi_enable_20906 vfio_msi_enable 2 20906 NULL
105810 +lbs_rdbbp_write_20918 lbs_rdbbp_write 3 20918 NULL
105811 +htable_bits_20933 htable_bits 0 20933 NULL
105812 +check_eofblocks_fl_20942 check_eofblocks_fl 0 20942 NULL
105813 +altera_set_ir_post_20948 altera_set_ir_post 2 20948 NULL
105814 +rx_rx_phy_hdr_read_20950 rx_rx_phy_hdr_read 3 20950 NULL
105815 +rsxx_cram_read_20957 rsxx_cram_read 3 20957 NULL
105816 +nfs_map_name_to_uid_20962 nfs_map_name_to_uid 3 20962 NULL
105817 +snd_rme9652_playback_copy_20970 snd_rme9652_playback_copy 5 20970 NULL
105818 +alg_setsockopt_20985 alg_setsockopt 5 20985 NULL
105819 +qib_verbs_send_20999 qib_verbs_send 5-3 20999 NULL
105820 +ocfs2_free_clusters_21001 ocfs2_free_clusters 4-0 21001 NULL
105821 +btrfs_inode_ref_name_len_21024 btrfs_inode_ref_name_len 0 21024 NULL
105822 +rx_defrag_tkip_called_read_21031 rx_defrag_tkip_called_read 3 21031 NULL
105823 +lbs_threshold_read_21046 lbs_threshold_read 5 21046 NULL
105824 +reiserfs_direct_IO_21051 reiserfs_direct_IO 4 21051 NULL
105825 +proc_fault_inject_write_21058 proc_fault_inject_write 3 21058 NULL
105826 +btrfs_insert_root_21061 btrfs_insert_root 0 21061 NULL
105827 +qdisc_get_default_21072 qdisc_get_default 2 21072 NULL
105828 +event_calibration_read_21083 event_calibration_read 3 21083 NULL
105829 +bl_add_page_to_bio_21094 bl_add_page_to_bio 2 21094 NULL nohasharray
105830 +multipath_status_21094 multipath_status 5 21094 &bl_add_page_to_bio_21094
105831 +ext2_valid_block_bitmap_21101 ext2_valid_block_bitmap 3 21101 NULL
105832 +ath6kl_send_go_probe_resp_21113 ath6kl_send_go_probe_resp 3 21113 NULL
105833 +bitset_size_in_bytes_21124 bitset_size_in_bytes 0-1 21124 NULL
105834 +i2400m_rx_trace_21127 i2400m_rx_trace 3 21127 NULL
105835 +tps6586x_irq_init_21144 tps6586x_irq_init 3 21144 NULL
105836 +chunk_size_21146 chunk_size 0 21146 NULL
105837 +ocfs2_block_check_validate_21149 ocfs2_block_check_validate 2 21149 NULL
105838 +alloc_pg_vec_21159 alloc_pg_vec 3 21159 NULL
105839 +btrfs_add_root_ref_21186 btrfs_add_root_ref 0 21186 NULL
105840 +cx18_v4l2_read_21196 cx18_v4l2_read 3 21196 NULL
105841 +get_current_ntfs_time_21198 get_current_ntfs_time 0 21198 NULL
105842 +ipc_rcu_alloc_21208 ipc_rcu_alloc 1 21208 NULL
105843 +scsi_execute_req_flags_21215 scsi_execute_req_flags 5 21215 NULL
105844 +_ocfs2_free_clusters_21220 _ocfs2_free_clusters 4-0 21220 NULL
105845 +get_numpages_21227 get_numpages 0-1-2 21227 NULL
105846 +SyS_mlock_21238 SyS_mlock 1-2 21238 NULL
105847 +input_ff_create_21240 input_ff_create 2 21240 NULL
105848 +cfg80211_notify_new_peer_candidate_21242 cfg80211_notify_new_peer_candidate 4 21242 NULL
105849 +fru_length_21257 fru_length 0 21257 NULL
105850 +rtw_set_wps_beacon_21262 rtw_set_wps_beacon 3 21262 NULL
105851 +ocfs2_blocks_for_bytes_21268 ocfs2_blocks_for_bytes 0-2 21268 NULL
105852 +vmw_gmr2_bind_21305 vmw_gmr2_bind 3 21305 NULL
105853 +do_msg_fill_21307 do_msg_fill 3 21307 NULL
105854 +add_res_range_21310 add_res_range 4 21310 NULL
105855 +get_zeroed_page_21322 get_zeroed_page 0 21322 NULL
105856 +ftrace_profile_read_21327 ftrace_profile_read 3 21327 NULL
105857 +gfs2_ea_get_copy_21353 gfs2_ea_get_copy 0 21353 NULL
105858 +max77693_irq_domain_map_21357 max77693_irq_domain_map 2 21357 NULL
105859 +alloc_orinocodev_21371 alloc_orinocodev 1 21371 NULL
105860 +split_leaf_21378 split_leaf 0 21378 NULL
105861 +SYSC_rt_sigpending_21379 SYSC_rt_sigpending 2 21379 NULL
105862 +video_ioctl2_21380 video_ioctl2 2 21380 NULL
105863 +diva_get_driver_dbg_mask_21399 diva_get_driver_dbg_mask 0 21399 NULL
105864 +sle64_to_cpu_21400 sle64_to_cpu 0-1 21400 NULL
105865 +snd_m3_inw_21406 snd_m3_inw 0 21406 NULL
105866 +snapshot_read_next_21426 snapshot_read_next 0 21426 NULL
105867 +tcp_bound_to_half_wnd_21429 tcp_bound_to_half_wnd 0-2 21429 NULL
105868 +tracing_saved_cmdlines_read_21434 tracing_saved_cmdlines_read 3 21434 NULL
105869 +aggr_size_tx_agg_vs_rate_read_21438 aggr_size_tx_agg_vs_rate_read 3 21438 NULL
105870 +__ertm_hdr_size_21450 __ertm_hdr_size 0 21450 NULL
105871 +ReadISAR_21453 ReadISAR 0 21453 NULL
105872 +mei_nfc_send_21477 mei_nfc_send 3 21477 NULL
105873 +read_file_xmit_21487 read_file_xmit 3 21487 NULL
105874 +mmc_alloc_sg_21504 mmc_alloc_sg 1 21504 NULL
105875 +dma_skb_copy_datagram_iovec_21516 dma_skb_copy_datagram_iovec 3-5 21516 NULL
105876 +btrfs_file_aio_write_21520 btrfs_file_aio_write 4 21520 NULL
105877 +il_dbgfs_stations_read_21532 il_dbgfs_stations_read 3 21532 NULL
105878 +cipso_v4_map_cat_enum_hton_21540 cipso_v4_map_cat_enum_hton 0 21540 NULL
105879 +rxrpc_send_data_21553 rxrpc_send_data 5 21553 NULL
105880 +rx_rx_beacon_early_term_read_21559 rx_rx_beacon_early_term_read 3 21559 NULL
105881 +xfs_buf_read_uncached_21585 xfs_buf_read_uncached 3 21585 NULL
105882 +snd_es18xx_mixer_read_21586 snd_es18xx_mixer_read 0 21586 NULL
105883 +ocfs2_acl_from_xattr_21604 ocfs2_acl_from_xattr 2 21604 NULL
105884 +filemap_get_page_21606 filemap_get_page 2 21606 NULL
105885 +xlog_do_recovery_pass_21618 xlog_do_recovery_pass 3 21618 NULL
105886 +pci_cardbus_resource_alignment_21625 pci_cardbus_resource_alignment 0 21625 NULL
105887 +add_pin_to_irq_node_21628 add_pin_to_irq_node 2 21628 NULL
105888 +ocfs2_refcount_cow_hunk_21630 ocfs2_refcount_cow_hunk 3-4 21630 NULL
105889 +__jfs_getxattr_21631 __jfs_getxattr 0 21631 NULL
105890 +validate_nnode_21638 validate_nnode 0 21638 NULL
105891 +__irq_alloc_descs_21639 __irq_alloc_descs 2-1-0-3-4 21639 NULL
105892 +ocfs2_lock_refcount_allocators_21646 ocfs2_lock_refcount_allocators 0 21646 NULL
105893 +atalk_sendmsg_21677 atalk_sendmsg 4 21677 NULL
105894 +ocfs2_xattr_get_nolock_21678 ocfs2_xattr_get_nolock 0 21678 NULL
105895 +rtllib_alloc_txb_21687 rtllib_alloc_txb 1 21687 NULL
105896 +evdev_ioctl_handler_21705 evdev_ioctl_handler 2 21705 NULL
105897 +btrfs_find_all_leafs_21712 btrfs_find_all_leafs 0 21712 NULL
105898 +unix_skb_len_21722 unix_skb_len 0 21722 NULL
105899 +lprocfs_wr_import_21728 lprocfs_wr_import 3 21728 NULL
105900 +ocfs2_remove_rightmost_path_21729 ocfs2_remove_rightmost_path 0 21729 NULL
105901 +reiserfs_allocate_list_bitmaps_21732 reiserfs_allocate_list_bitmaps 3 21732 NULL nohasharray
105902 +ext4_split_extent_at_21732 ext4_split_extent_at 0 21732 &reiserfs_allocate_list_bitmaps_21732
105903 +vm_brk_21739 vm_brk 1-2 21739 NULL
105904 +mthca_alloc_init_21754 mthca_alloc_init 2 21754 NULL
105905 +usbat_flash_read_data_21762 usbat_flash_read_data 4 21762 NULL
105906 +gen_pool_add_21776 gen_pool_add 3-4 21776 NULL
105907 +SyS_sendfile_21777 SyS_sendfile 4 21777 NULL
105908 +atomic64_cmpxchg_21782 atomic64_cmpxchg 0 21782 NULL
105909 +xfs_da_grow_inode_int_21785 xfs_da_grow_inode_int 3 21785 NULL
105910 +dvb_generic_ioctl_21810 dvb_generic_ioctl 2 21810 NULL
105911 +__ocfs2_cluster_lock_21812 __ocfs2_cluster_lock 0 21812 NULL
105912 +wm8994_request_irq_21822 wm8994_request_irq 2 21822 NULL
105913 +oom_adj_read_21847 oom_adj_read 3 21847 NULL
105914 +gen_unique_name_21852 gen_unique_name 0 21852 NULL
105915 +lpfc_idiag_extacc_avail_get_21865 lpfc_idiag_extacc_avail_get 0-3 21865 NULL
105916 +brcms_debugfs_hardware_read_21867 brcms_debugfs_hardware_read 3 21867 NULL
105917 +sisusbcon_bmove_21873 sisusbcon_bmove 6-5-7 21873 NULL
105918 +ldlm_lock_create_21888 ldlm_lock_create 7 21888 NULL
105919 +__alloc_reserved_percpu_21895 __alloc_reserved_percpu 2-1 21895 NULL
105920 +rio_destid_first_21900 rio_destid_first 0 21900 NULL
105921 +dbAllocCtl_21911 dbAllocCtl 0 21911 NULL
105922 +qsfp_1_read_21915 qsfp_1_read 3 21915 NULL
105923 +ast_ttm_tt_create_21917 ast_ttm_tt_create 2 21917 NULL
105924 +__ocfs2_claim_clusters_21936 __ocfs2_claim_clusters 0 21936 NULL
105925 +alloc_ldt_21972 alloc_ldt 2 21972 NULL
105926 +SYSC_prctl_21980 SYSC_prctl 4 21980 NULL
105927 +rxpipe_descr_host_int_trig_rx_data_read_22001 rxpipe_descr_host_int_trig_rx_data_read 3 22001 NULL nohasharray
105928 +compat_rw_copy_check_uvector_22001 compat_rw_copy_check_uvector 3-0 22001 &rxpipe_descr_host_int_trig_rx_data_read_22001
105929 +ocfs2_reserve_cluster_bitmap_bits_22016 ocfs2_reserve_cluster_bitmap_bits 0 22016 NULL
105930 +regcache_sync_block_raw_flush_22021 regcache_sync_block_raw_flush 3-4 22021 NULL
105931 +btrfs_get_16_22023 btrfs_get_16 0 22023 NULL
105932 +i915_gem_evict_something_22028 i915_gem_evict_something 0 22028 NULL
105933 +_sp2d_min_pg_22032 _sp2d_min_pg 0 22032 NULL
105934 +zd_usb_read_fw_22049 zd_usb_read_fw 4 22049 NULL
105935 +atalk_recvmsg_22053 atalk_recvmsg 4 22053 NULL
105936 +ieee80211_if_fmt_dropped_frames_ttl_22054 ieee80211_if_fmt_dropped_frames_ttl 3 22054 NULL
105937 +btrfs_reloc_clone_csums_22077 btrfs_reloc_clone_csums 2-3 22077 NULL
105938 +write_opcode_22082 write_opcode 2 22082 NULL
105939 +mem_rw_22085 mem_rw 3 22085 NULL
105940 +snd_pcm_xrun_22088 snd_pcm_xrun 0 22088 NULL
105941 +sched_clock_cpu_22098 sched_clock_cpu 0 22098 NULL
105942 +rt2x00debug_read_crypto_stats_22109 rt2x00debug_read_crypto_stats 3 22109 NULL
105943 +shmem_add_to_page_cache_22121 shmem_add_to_page_cache 0 22121 NULL
105944 +snd_hda_codec_read_22130 snd_hda_codec_read 0 22130 NULL
105945 +SyS_sched_setaffinity_22148 SyS_sched_setaffinity 2 22148 NULL
105946 +do_tcp_sendpages_22155 do_tcp_sendpages 4 22155 NULL
105947 +remap_pud_range_22164 remap_pud_range 3-4-5 22164 NULL
105948 +__kfifo_alloc_22173 __kfifo_alloc 3 22173 NULL nohasharray
105949 +vmci_qp_broker_map_22173 vmci_qp_broker_map 3 22173 &__kfifo_alloc_22173
105950 +fls_22210 fls 0 22210 NULL
105951 +rfcomm_sock_recvmsg_22227 rfcomm_sock_recvmsg 4 22227 NULL nohasharray
105952 +bitmap_clear_bits_22227 bitmap_clear_bits 3 22227 &rfcomm_sock_recvmsg_22227
105953 +mem_write_22232 mem_write 3 22232 NULL
105954 +p9_virtio_zc_request_22240 p9_virtio_zc_request 6-5 22240 NULL
105955 +fsnotify_parent_22243 fsnotify_parent 0 22243 NULL
105956 +compat_process_vm_rw_22254 compat_process_vm_rw 3-5 22254 NULL
105957 +ping_common_sendmsg_22261 ping_common_sendmsg 5 22261 NULL
105958 +add_res_tree_22263 add_res_tree 7 22263 NULL
105959 +__btrfs_direct_write_22273 __btrfs_direct_write 4 22273 NULL
105960 +queue_max_sectors_22280 queue_max_sectors 0 22280 NULL
105961 +i915_gem_execbuffer_relocate_22294 i915_gem_execbuffer_relocate 0 22294 NULL
105962 +pci_vpd_srdt_size_22300 pci_vpd_srdt_size 0 22300 NULL nohasharray
105963 +__tun_chr_ioctl_22300 __tun_chr_ioctl 4 22300 &pci_vpd_srdt_size_22300
105964 +mesh_table_alloc_22305 mesh_table_alloc 1 22305 NULL
105965 +lov_setstripe_22307 lov_setstripe 2 22307 NULL
105966 +udpv6_sendmsg_22316 udpv6_sendmsg 4 22316 NULL
105967 +C_SYSC_msgrcv_22320 C_SYSC_msgrcv 3 22320 NULL
105968 +radix_tree_find_next_bit_22334 radix_tree_find_next_bit 3-2-0 22334 NULL
105969 +atomic_read_22342 atomic_read 0 22342 NULL
105970 +ll_lazystatfs_seq_write_22353 ll_lazystatfs_seq_write 3 22353 NULL
105971 +mlx4_db_alloc_22358 mlx4_db_alloc 3 22358 NULL
105972 +irq_reserve_irq_22360 irq_reserve_irq 1 22360 NULL nohasharray
105973 +memcg_size_22360 memcg_size 0 22360 &irq_reserve_irq_22360
105974 +snd_pcm_alsa_frames_22363 snd_pcm_alsa_frames 2 22363 NULL
105975 +tps6586x_gpio_to_irq_22365 tps6586x_gpio_to_irq 2 22365 NULL
105976 +evdev_ioctl_22371 evdev_ioctl 2 22371 NULL
105977 +alloc_large_system_hash_22391 alloc_large_system_hash 2-8-9 22391 NULL
105978 +ocfs2_assign_bh_22392 ocfs2_assign_bh 0 22392 NULL
105979 +zoran_write_22404 zoran_write 3 22404 NULL
105980 +ATOMIC_ADD_RETURN_22413 ATOMIC_ADD_RETURN 2 22413 NULL
105981 +queue_reply_22416 queue_reply 3 22416 NULL
105982 +__set_enter_print_fmt_22431 __set_enter_print_fmt 0 22431 NULL
105983 +queue_max_segments_22441 queue_max_segments 0 22441 NULL
105984 +_rtw_vmalloc_22444 _rtw_vmalloc 1 22444 NULL
105985 +handle_received_packet_22457 handle_received_packet 3 22457 NULL
105986 +mem_cgroup_read_22461 mem_cgroup_read 5 22461 NULL
105987 +btrfs_write_out_cache_22471 btrfs_write_out_cache 0 22471 NULL nohasharray
105988 +dtf_write_device_22471 dtf_write_device 3 22471 &btrfs_write_out_cache_22471
105989 +source_sink_start_ep_22472 source_sink_start_ep 0 22472 NULL
105990 +alloc_desc_22487 alloc_desc 2 22487 NULL
105991 +ecryptfs_write_22488 ecryptfs_write 4-3 22488 NULL
105992 +qib_user_sdma_alloc_header_22490 qib_user_sdma_alloc_header 2 22490 NULL
105993 +cache_write_procfs_22491 cache_write_procfs 3 22491 NULL
105994 +mp_find_ioapic_pin_22499 mp_find_ioapic_pin 0-2 22499 NULL
105995 +mutex_lock_interruptible_22505 mutex_lock_interruptible 0 22505 NULL
105996 +__btrfs_add_delayed_item_22511 __btrfs_add_delayed_item 0 22511 NULL
105997 +trim_no_bitmap_22524 trim_no_bitmap 4-3 22524 NULL
105998 +ocfs2_read_extent_block_22550 ocfs2_read_extent_block 0 22550 NULL
105999 +agp_alloc_page_array_22554 agp_alloc_page_array 1 22554 NULL
106000 +snd_pcm_hw_params_choose_22560 snd_pcm_hw_params_choose 0 22560 NULL
106001 +dbFindCtl_22587 dbFindCtl 0 22587 NULL
106002 +snapshot_read_22601 snapshot_read 3 22601 NULL
106003 +ocfs2_get_refcount_block_22610 ocfs2_get_refcount_block 0 22610 NULL
106004 +remove_breakpoint_22628 remove_breakpoint 3 22628 NULL
106005 +sctp_setsockopt_connectx_old_22631 sctp_setsockopt_connectx_old 3 22631 NULL
106006 +ide_core_cp_entry_22636 ide_core_cp_entry 3 22636 NULL
106007 +wl1271_rx_filter_get_fields_size_22638 wl1271_rx_filter_get_fields_size 0 22638 NULL
106008 +pwr_wake_on_timer_exp_read_22640 pwr_wake_on_timer_exp_read 3 22640 NULL
106009 +sysfs_attr_ns_22645 sysfs_attr_ns 0 22645 NULL
106010 +iwl_dbgfs_calib_disabled_read_22649 iwl_dbgfs_calib_disabled_read 3 22649 NULL
106011 +compat_SyS_msgrcv_22661 compat_SyS_msgrcv 3 22661 NULL
106012 +ubifs_leb_write_22679 ubifs_leb_write 4-5 22679 NULL nohasharray
106013 +ext4_ext_direct_IO_22679 ext4_ext_direct_IO 4 22679 &ubifs_leb_write_22679
106014 +fill_gap_22681 fill_gap 0 22681 NULL nohasharray
106015 +l2tp_ip_recvmsg_22681 l2tp_ip_recvmsg 4 22681 &fill_gap_22681
106016 +bch_dump_read_22685 bch_dump_read 3 22685 NULL
106017 +reg_umr_22686 reg_umr 5 22686 NULL
106018 +ocfs2_get_block_22687 ocfs2_get_block 2 22687 NULL
106019 +map_22700 map 2 22700 NULL nohasharray
106020 +btrfs_del_inode_ref_in_log_22700 btrfs_del_inode_ref_in_log 0 22700 &map_22700
106021 +alloc_libipw_22708 alloc_libipw 1 22708 NULL
106022 +ecryptfs_write_metadata_to_contents_22721 ecryptfs_write_metadata_to_contents 3 22721 NULL
106023 +cx18_copy_buf_to_user_22735 cx18_copy_buf_to_user 4-0 22735 NULL
106024 +ceph_decode_32_22738 ceph_decode_32 0 22738 NULL nohasharray
106025 +__mei_cl_send_22738 __mei_cl_send 3 22738 &ceph_decode_32_22738
106026 +iio_debugfs_write_reg_22742 iio_debugfs_write_reg 3 22742 NULL
106027 +qlcnic_sriov_init_22762 qlcnic_sriov_init 2 22762 NULL
106028 +print_frame_22769 print_frame 0 22769 NULL
106029 +ftrace_arch_read_dyn_info_22773 ftrace_arch_read_dyn_info 0 22773 NULL
106030 +ocfs2_block_group_alloc_22774 ocfs2_block_group_alloc 0 22774 NULL
106031 +pla_ocp_write_22802 pla_ocp_write 4 22802 NULL
106032 +__generic_copy_to_user_intel_22806 __generic_copy_to_user_intel 0-3 22806 NULL
106033 +clone_bio_integrity_22842 clone_bio_integrity 4 22842 NULL
106034 +read_file_rcstat_22854 read_file_rcstat 3 22854 NULL
106035 +create_attr_set_22861 create_attr_set 1 22861 NULL
106036 +vmw_execbuf_process_22885 vmw_execbuf_process 5 22885 NULL
106037 +usblp_new_writeurb_22894 usblp_new_writeurb 2 22894 NULL
106038 +mdc800_device_read_22896 mdc800_device_read 3 22896 NULL
106039 +policy_emit_config_values_22900 policy_emit_config_values 3 22900 NULL
106040 +__btrfs_add_free_space_22917 __btrfs_add_free_space 0 22917 NULL
106041 +xstateregs_set_22932 xstateregs_set 4 22932 NULL
106042 +pcpu_mem_zalloc_22948 pcpu_mem_zalloc 1 22948 NULL
106043 +alloc_sglist_22960 alloc_sglist 3-2 22960 NULL
106044 +caif_seqpkt_sendmsg_22961 caif_seqpkt_sendmsg 4 22961 NULL
106045 +vme_get_size_22964 vme_get_size 0 22964 NULL
106046 +tx_frag_key_not_found_read_22971 tx_frag_key_not_found_read 3 22971 NULL
106047 +cached_dev_cache_miss_22979 cached_dev_cache_miss 4 22979 NULL
106048 +usb_get_langid_22983 usb_get_langid 0 22983 NULL
106049 +set_msr_hyperv_22985 set_msr_hyperv 3 22985 NULL
106050 +remote_settings_file_write_22987 remote_settings_file_write 3 22987 NULL
106051 +brcmf_sdio_chip_exit_download_23001 brcmf_sdio_chip_exit_download 4 23001 NULL
106052 +viafb_dvp0_proc_write_23023 viafb_dvp0_proc_write 3 23023 NULL
106053 +cifs_local_to_utf16_bytes_23025 cifs_local_to_utf16_bytes 0 23025 NULL
106054 +ocfs2_refcount_cow_xattr_23029 ocfs2_refcount_cow_xattr 0-6-7 23029 NULL
106055 +st_status_23032 st_status 5 23032 NULL
106056 +nv50_disp_chan_create__23056 nv50_disp_chan_create_ 5 23056 NULL
106057 +comedi_buf_write_n_available_23057 comedi_buf_write_n_available 0 23057 NULL
106058 +reiserfs_add_entry_23062 reiserfs_add_entry 4 23062 NULL nohasharray
106059 +unix_seqpacket_recvmsg_23062 unix_seqpacket_recvmsg 4 23062 &reiserfs_add_entry_23062
106060 +mei_cl_send_23068 mei_cl_send 3 23068 NULL
106061 +filp_close_23071 filp_close 0 23071 NULL
106062 +kvm_mmu_gva_to_gpa_write_23075 kvm_mmu_gva_to_gpa_write 0 23075 NULL
106063 +vm_map_ram_23078 vm_map_ram 3 23078 NULL nohasharray
106064 +raw_sendmsg_23078 raw_sendmsg 4 23078 &vm_map_ram_23078
106065 +get_user_hdr_len_23079 get_user_hdr_len 0 23079 NULL
106066 +isr_tx_procs_read_23084 isr_tx_procs_read 3 23084 NULL
106067 +lnw_gpio_irq_map_23087 lnw_gpio_irq_map 2 23087 NULL
106068 +rt2x00debug_write_eeprom_23091 rt2x00debug_write_eeprom 3 23091 NULL
106069 +__add_prelim_ref_23095 __add_prelim_ref 0 23095 NULL
106070 +fls_long_23096 fls_long 0 23096 NULL
106071 +ntfs_ucstonls_23097 ntfs_ucstonls 5-3 23097 NULL
106072 +pipe_iov_copy_from_user_23102 pipe_iov_copy_from_user 3 23102 NULL
106073 +dgram_recvmsg_23104 dgram_recvmsg 4 23104 NULL
106074 +ip_recv_error_23109 ip_recv_error 3 23109 NULL
106075 +mwl8k_cmd_set_beacon_23110 mwl8k_cmd_set_beacon 4 23110 NULL
106076 +nl80211_send_rx_auth_23111 nl80211_send_rx_auth 4 23111 NULL
106077 +__clear_user_23118 __clear_user 0-2 23118 NULL
106078 +drm_mode_create_tv_properties_23122 drm_mode_create_tv_properties 2 23122 NULL
106079 +ata_scsi_change_queue_depth_23126 ata_scsi_change_queue_depth 2 23126 NULL
106080 +ext3_listxattr_23137 ext3_listxattr 3 23137 NULL
106081 +read_file_ani_23161 read_file_ani 3 23161 NULL
106082 +tg_get_cfs_quota_23176 tg_get_cfs_quota 0 23176 NULL
106083 +usblp_write_23178 usblp_write 3 23178 NULL
106084 +msnd_fifo_alloc_23179 msnd_fifo_alloc 2 23179 NULL
106085 +gss_pipe_downcall_23182 gss_pipe_downcall 3 23182 NULL
106086 +fix_unclean_leb_23188 fix_unclean_leb 3 23188 NULL
106087 +mpi_alloc_limb_space_23190 mpi_alloc_limb_space 1 23190 NULL
106088 +convert_ip_to_linear_23198 convert_ip_to_linear 0 23198 NULL
106089 +pm80x_free_irq_23210 pm80x_free_irq 2 23210 NULL
106090 +tty_buffer_request_room_23228 tty_buffer_request_room 2-0 23228 NULL
106091 +__read_status_pci_23229 __read_status_pci 0 23229 NULL nohasharray
106092 +xlog_get_bp_23229 xlog_get_bp 2 23229 &__read_status_pci_23229
106093 +ft1000_read_dpram_mag_32_23232 ft1000_read_dpram_mag_32 0 23232 NULL
106094 +rxrpc_client_sendmsg_23236 rxrpc_client_sendmsg 5 23236 NULL
106095 +__gfn_to_rmap_23240 __gfn_to_rmap 2-1 23240 NULL
106096 +sctp_recvmsg_23265 sctp_recvmsg 4 23265 NULL
106097 +uwb_dev_addr_print_23282 uwb_dev_addr_print 2 23282 NULL
106098 +diva_get_trace_filter_23286 diva_get_trace_filter 0 23286 NULL
106099 +gru_preload_tlb_23305 gru_preload_tlb 7-4 23305 NULL
106100 +i2cdev_write_23310 i2cdev_write 3 23310 NULL
106101 +__aa_kvmalloc_23320 __aa_kvmalloc 1 23320 NULL
106102 +__es_remove_extent_23323 __es_remove_extent 0 23323 NULL
106103 +page_readlink_23346 page_readlink 3 23346 NULL
106104 +kmem_zalloc_large_23351 kmem_zalloc_large 1 23351 NULL
106105 +get_dst_timing_23358 get_dst_timing 0 23358 NULL
106106 +fd_setup_write_same_buf_23369 fd_setup_write_same_buf 3 23369 NULL
106107 +iscsi_change_queue_depth_23416 iscsi_change_queue_depth 2 23416 NULL
106108 +vga_mm_r_23419 vga_mm_r 0 23419 NULL
106109 +vzalloc_node_23424 vzalloc_node 1-2 23424 NULL
106110 +__ctzsi2_23444 __ctzsi2 1 23444 NULL
106111 +ocfs2_zero_tail_23447 ocfs2_zero_tail 3 23447 NULL
106112 +hidraw_send_report_23449 hidraw_send_report 3 23449 NULL
106113 +__ata_change_queue_depth_23484 __ata_change_queue_depth 3-0 23484 NULL
106114 +linear_conf_23485 linear_conf 2 23485 NULL
106115 +copy_module_from_user_23492 copy_module_from_user 2 23492 NULL
106116 +event_filter_read_23494 event_filter_read 3 23494 NULL
106117 +ext4_remove_blocks_23497 ext4_remove_blocks 0 23497 NULL
106118 +lustre_acl_xattr_merge2ext_23502 lustre_acl_xattr_merge2ext 2 23502 NULL
106119 +__gfn_to_hva_many_23508 __gfn_to_hva_many 0-2 23508 NULL
106120 +devm_iio_device_alloc_23511 devm_iio_device_alloc 2 23511 NULL
106121 +__proc_cpt_table_23516 __proc_cpt_table 5 23516 NULL
106122 +ima_show_measurements_count_23536 ima_show_measurements_count 3 23536 NULL
106123 +__es_insert_extent_23543 __es_insert_extent 0 23543 NULL
106124 +xen_allocate_irq_gsi_23546 xen_allocate_irq_gsi 1-0 23546 NULL
106125 +tcp_current_mss_23552 tcp_current_mss 0 23552 NULL
106126 +dbg_leb_change_23555 dbg_leb_change 0-4 23555 NULL
106127 +btrfs_super_bytenr_23561 btrfs_super_bytenr 0 23561 NULL
106128 +venus_symlink_23570 venus_symlink 6-4 23570 NULL
106129 +iwl_dbgfs_interrupt_read_23574 iwl_dbgfs_interrupt_read 3 23574 NULL
106130 +xfpregs_get_23586 xfpregs_get 4 23586 NULL
106131 +snd_interval_min_23590 snd_interval_min 0 23590 NULL
106132 +do_mmap_pgoff_23600 do_mmap_pgoff 0 23600 NULL
106133 +islpci_mgt_transaction_23610 islpci_mgt_transaction 5 23610 NULL
106134 +ocfs2_journal_access_23616 ocfs2_journal_access 0 23616 NULL
106135 +alloc_pte_23642 alloc_pte 3 23642 NULL
106136 +__i2400mu_send_barker_23652 __i2400mu_send_barker 3 23652 NULL
106137 +sInW_23663 sInW 0 23663 NULL
106138 +SyS_connect_23669 SyS_connect 3 23669 NULL
106139 +cx18_read_23699 cx18_read 3 23699 NULL
106140 +btrfs_insert_delayed_item_23705 btrfs_insert_delayed_item 0 23705 NULL
106141 +at_get_23708 at_get 0 23708 NULL
106142 +snd_pcm_hw_refine_23721 snd_pcm_hw_refine 0 23721 NULL
106143 +mp_config_acpi_gsi_23728 mp_config_acpi_gsi 2 23728 NULL
106144 +rx_rx_dropped_frame_read_23748 rx_rx_dropped_frame_read 3 23748 NULL
106145 +__kfifo_max_r_23768 __kfifo_max_r 0-2-1 23768 NULL
106146 +svc_prepare_thread_23772 svc_prepare_thread 3 23772 NULL
106147 +__build_packet_message_23778 __build_packet_message 4-10 23778 NULL
106148 +security_inode_getxattr_23781 security_inode_getxattr 0 23781 NULL
106149 +cfg80211_inform_bss_width_frame_23782 cfg80211_inform_bss_width_frame 5 23782 NULL
106150 +mpt_free_res_23793 mpt_free_res 5 23793 NULL
106151 +map_write_23795 map_write 3 23795 NULL
106152 +diva_alloc_dma_map_23798 diva_alloc_dma_map 2 23798 NULL
106153 +rx_path_reset_read_23801 rx_path_reset_read 3 23801 NULL
106154 +ocfs2_replace_cow_23803 ocfs2_replace_cow 0 23803 NULL
106155 +__earlyonly_bootmem_alloc_23824 __earlyonly_bootmem_alloc 2 23824 NULL
106156 +lustre_msg_buflen_23827 lustre_msg_buflen 0 23827 NULL
106157 +ceph_copy_page_vector_to_user_23829 ceph_copy_page_vector_to_user 0-4-3 23829 NULL
106158 +tfrc_binsearch_23833 tfrc_binsearch 0 23833 NULL
106159 +pgdat_end_pfn_23842 pgdat_end_pfn 0 23842 NULL
106160 +iwl_dbgfs_nvm_read_23845 iwl_dbgfs_nvm_read 3 23845 NULL
106161 +p54_init_common_23850 p54_init_common 1 23850 NULL
106162 +gart_alloc_coherent_23852 gart_alloc_coherent 2 23852 NULL
106163 +bin_to_hex_dup_23853 bin_to_hex_dup 2 23853 NULL
106164 +ocfs2_xattr_get_clusters_23857 ocfs2_xattr_get_clusters 0 23857 NULL
106165 +ieee80211_if_read_dot11MeshMaxPeerLinks_23878 ieee80211_if_read_dot11MeshMaxPeerLinks 3 23878 NULL
106166 +nes_alloc_resource_23891 nes_alloc_resource 3 23891 NULL
106167 +_rtw_zvmalloc_23892 _rtw_zvmalloc 1 23892 NULL
106168 +tipc_snprintf_23893 tipc_snprintf 2-0 23893 NULL
106169 +usbg_prepare_w_request_23895 usbg_prepare_w_request 0 23895 NULL
106170 +add_new_gdb_meta_bg_23911 add_new_gdb_meta_bg 3 23911 NULL nohasharray
106171 +ieee80211_if_read_hw_queues_23911 ieee80211_if_read_hw_queues 3 23911 &add_new_gdb_meta_bg_23911
106172 +device_create_bin_file_23914 device_create_bin_file 0 23914 NULL
106173 +f2fs_getxattr_23917 f2fs_getxattr 0 23917 NULL
106174 +ipath_reg_phys_mr_23918 ipath_reg_phys_mr 3 23918 NULL nohasharray
106175 +mpihelp_mul_karatsuba_case_23918 mpihelp_mul_karatsuba_case 5-3 23918 &ipath_reg_phys_mr_23918
106176 +kvm_read_guest_23928 kvm_read_guest 4-2 23928 NULL nohasharray
106177 +intel_ring_alloc_seqno_23928 intel_ring_alloc_seqno 0 23928 &kvm_read_guest_23928
106178 +__alloc_skb_23940 __alloc_skb 4 23940 NULL
106179 +uvc_endpoint_max_bpi_23944 uvc_endpoint_max_bpi 0 23944 NULL
106180 +cifs_setxattr_23957 cifs_setxattr 4 23957 NULL
106181 +size_roundup_power2_23958 size_roundup_power2 0-1 23958 NULL
106182 +vfs_write_23971 vfs_write 0-3 23971 NULL
106183 +sddr55_write_data_23983 sddr55_write_data 4 23983 NULL
106184 +zd_usb_iowrite16v_async_23984 zd_usb_iowrite16v_async 3 23984 NULL
106185 +cxgb_alloc_mem_24007 cxgb_alloc_mem 1 24007 NULL
106186 +intel_ring_idle_24014 intel_ring_idle 0 24014 NULL
106187 +give_pages_24021 give_pages 3 24021 NULL
106188 +ocfs2_mark_extent_refcounted_24035 ocfs2_mark_extent_refcounted 6 24035 NULL
106189 +adis16400_show_serial_number_24037 adis16400_show_serial_number 3 24037 NULL
106190 +hmac_setkey_24043 hmac_setkey 3 24043 NULL
106191 +afs_cell_alloc_24052 afs_cell_alloc 2 24052 NULL
106192 +blkcipher_copy_iv_24075 blkcipher_copy_iv 3 24075 NULL
106193 +vb2_fop_read_24080 vb2_fop_read 3 24080 NULL
106194 +ocfs2_cannibalize_resv_24090 ocfs2_cannibalize_resv 3 24090 NULL
106195 +pipeline_post_proc_swi_read_24108 pipeline_post_proc_swi_read 3 24108 NULL
106196 +request_key_auth_read_24109 request_key_auth_read 3 24109 NULL
106197 +lov_brw_24122 lov_brw 4 24122 NULL
106198 +mpu401_read_24126 mpu401_read 3-0 24126 NULL
106199 +_picolcd_flash_write_24134 _picolcd_flash_write 4 24134 NULL
106200 +irnet_ctrl_write_24139 irnet_ctrl_write 3 24139 NULL
106201 +SyS_sethostname_24150 SyS_sethostname 2 24150 NULL
106202 +trim_bitmaps_24158 trim_bitmaps 3 24158 NULL
106203 +set_discard_24162 set_discard 2 24162 NULL
106204 +__copy_user_zeroing_intel_nocache_24163 __copy_user_zeroing_intel_nocache 0-3 24163 NULL
106205 +adu_read_24177 adu_read 3 24177 NULL
106206 +safe_prepare_write_buffer_24187 safe_prepare_write_buffer 3 24187 NULL
106207 +shrink_tnc_24190 shrink_tnc 0 24190 NULL
106208 +irq_remapping_setup_msi_irqs_24194 irq_remapping_setup_msi_irqs 2 24194 NULL
106209 +ieee80211_if_read_dot11MeshHWMPpreqMinInterval_24208 ieee80211_if_read_dot11MeshHWMPpreqMinInterval 3 24208 NULL
106210 +efx_vf_size_24213 efx_vf_size 0 24213 NULL
106211 +SyS_sendfile64_24220 SyS_sendfile64 4 24220 NULL
106212 +tcpprobe_sprint_24222 tcpprobe_sprint 0-2 24222 NULL
106213 +pcpu_embed_first_chunk_24224 pcpu_embed_first_chunk 3-2-1 24224 NULL nohasharray
106214 +mei_amthif_read_24224 mei_amthif_read 4 24224 &pcpu_embed_first_chunk_24224
106215 +pci_num_vf_24235 pci_num_vf 0 24235 NULL
106216 +sel_read_bool_24236 sel_read_bool 3 24236 NULL
106217 +dm_cache_save_hint_24257 dm_cache_save_hint 2 24257 NULL
106218 +em28xx_alloc_urbs_24260 em28xx_alloc_urbs 4-6 24260 NULL
106219 +calculate_sizes_24273 calculate_sizes 2 24273 NULL
106220 +thin_status_24278 thin_status 5 24278 NULL
106221 +msg_size_24288 msg_size 0 24288 NULL
106222 +ext2_free_blocks_24292 ext2_free_blocks 3-2 24292 NULL
106223 +i2c_smbus_check_pec_24297 i2c_smbus_check_pec 0 24297 NULL
106224 +map_page_24298 map_page 3-4 24298 NULL
106225 +gserial_connect_24302 gserial_connect 0 24302 NULL
106226 +btmrvl_pscmd_read_24308 btmrvl_pscmd_read 3 24308 NULL
106227 +reserve_metadata_bytes_24313 reserve_metadata_bytes 3-0 24313 NULL
106228 +ath6kl_add_bss_if_needed_24317 ath6kl_add_bss_if_needed 6 24317 NULL
106229 +ocfs2_direct_IO_get_blocks_24333 ocfs2_direct_IO_get_blocks 2 24333 NULL
106230 +si476x_radio_read_acf_blob_24336 si476x_radio_read_acf_blob 3 24336 NULL
106231 +kzalloc_node_24352 kzalloc_node 1-3 24352 NULL
106232 +qla2x00_handle_queue_full_24365 qla2x00_handle_queue_full 2 24365 NULL
106233 +cfi_read_pri_24366 cfi_read_pri 3 24366 NULL
106234 +btrfs_item_size_nr_24367 btrfs_item_size_nr 0 24367 NULL
106235 +igetword_24373 igetword 0 24373 NULL
106236 +max_io_len_24384 max_io_len 0-1 24384 NULL
106237 +mpt_alloc_res_24387 mpt_alloc_res 5 24387 NULL
106238 +osc_cur_grant_bytes_seq_write_24396 osc_cur_grant_bytes_seq_write 3 24396 NULL
106239 +pvr2_v4l2_ioctl_24398 pvr2_v4l2_ioctl 2 24398 NULL nohasharray
106240 +getxattr_24398 getxattr 4 24398 &pvr2_v4l2_ioctl_24398 nohasharray
106241 +__btrfs_write_out_cache_24398 __btrfs_write_out_cache 0 24398 &getxattr_24398
106242 +blk_update_bidi_request_24415 blk_update_bidi_request 3-4 24415 NULL
106243 +nvme_trans_log_supp_pages_24418 nvme_trans_log_supp_pages 3 24418 NULL
106244 +b43_debugfs_read_24425 b43_debugfs_read 3 24425 NULL
106245 +xenbus_file_read_24427 xenbus_file_read 3 24427 NULL
106246 +ieee80211_rx_mgmt_beacon_24430 ieee80211_rx_mgmt_beacon 3 24430 NULL
106247 +copy_and_ioctl_24434 copy_and_ioctl 4 24434 NULL
106248 +ixgbe_alloc_q_vector_24439 ixgbe_alloc_q_vector 6-4 24439 NULL
106249 +smk_user_access_24440 smk_user_access 3 24440 NULL nohasharray
106250 +rtw_set_wps_assoc_resp_24440 rtw_set_wps_assoc_resp 3 24440 &smk_user_access_24440
106251 +__push_leaf_left_24456 __push_leaf_left 0 24456 NULL
106252 +evdev_do_ioctl_24459 evdev_do_ioctl 2 24459 NULL
106253 +lbs_highsnr_write_24460 lbs_highsnr_write 3 24460 NULL
106254 +skb_copy_and_csum_datagram_iovec_24466 skb_copy_and_csum_datagram_iovec 2 24466 NULL nohasharray
106255 +ocfs2_write_cluster_by_desc_24466 ocfs2_write_cluster_by_desc 6-5 24466 &skb_copy_and_csum_datagram_iovec_24466
106256 +snd_pcm_hw_param_first_24487 snd_pcm_hw_param_first 0 24487 NULL
106257 +read_file_spec_scan_ctl_24491 read_file_spec_scan_ctl 3 24491 NULL
106258 +i915_gem_obj_offset_24494 i915_gem_obj_offset 0 24494 NULL
106259 +push_nodes_for_insert_24496 push_nodes_for_insert 0 24496 NULL
106260 +pd_video_read_24510 pd_video_read 3 24510 NULL
106261 +request_key_with_auxdata_24515 request_key_with_auxdata 4 24515 NULL
106262 +xfs_buf_get_map_24522 xfs_buf_get_map 3 24522 NULL
106263 +do_mpage_readpage_24536 do_mpage_readpage 3 24536 NULL
106264 +write_cache_pages_24562 write_cache_pages 0 24562 NULL
106265 +SyS_pselect6_24582 SyS_pselect6 1 24582 NULL
106266 +udf_compute_nr_groups_24594 udf_compute_nr_groups 0 24594 NULL
106267 +sensor_hub_get_physical_device_count_24605 sensor_hub_get_physical_device_count 0 24605 NULL nohasharray
106268 +lov_alloc_memmd_24605 lov_alloc_memmd 2 24605 &sensor_hub_get_physical_device_count_24605
106269 +SyS_poll_24620 SyS_poll 2 24620 NULL
106270 +kvm_pv_enable_async_pf_24637 kvm_pv_enable_async_pf 2 24637 NULL
106271 +context_alloc_24645 context_alloc 3 24645 NULL
106272 +blk_rq_err_bytes_24650 blk_rq_err_bytes 0 24650 NULL
106273 +btrfs_check_data_free_space_24692 btrfs_check_data_free_space 0 24692 NULL
106274 +datafab_write_data_24696 datafab_write_data 4 24696 NULL
106275 +intelfbhw_get_p1p2_24703 intelfbhw_get_p1p2 2 24703 NULL
106276 +simple_attr_read_24738 simple_attr_read 3 24738 NULL
106277 +qla2x00_change_queue_depth_24742 qla2x00_change_queue_depth 2 24742 NULL
106278 +get_dma_residue_24749 get_dma_residue 0 24749 NULL
106279 +ocfs2_cow_file_pos_24751 ocfs2_cow_file_pos 3 24751 NULL
106280 +kgdb_hex2mem_24755 kgdb_hex2mem 3 24755 NULL
106281 +kempld_read8_24756 kempld_read8 0 24756 NULL
106282 +ocfs2_read_blocks_24777 ocfs2_read_blocks 0 24777 NULL
106283 +datablob_hmac_verify_24786 datablob_hmac_verify 4 24786 NULL
106284 +cache_read_24790 cache_read 3 24790 NULL
106285 +__xfs_get_blocks_24791 __xfs_get_blocks 2 24791 NULL
106286 +i915_gem_check_wedge_24793 i915_gem_check_wedge 0 24793 NULL
106287 +user_regset_copyout_24796 user_regset_copyout 7 24796 NULL
106288 +kvm_read_guest_virt_helper_24804 kvm_read_guest_virt_helper 3-1 24804 NULL
106289 +__next_cpu_nr_24805 __next_cpu_nr 1 24805 NULL
106290 +sb_set_blocksize_24807 sb_set_blocksize 0-2 24807 NULL
106291 +ath6kl_fwlog_mask_write_24810 ath6kl_fwlog_mask_write 3 24810 NULL
106292 +comedi_buf_alloc_24822 comedi_buf_alloc 3 24822 NULL
106293 +net2272_read_24825 net2272_read 0 24825 NULL
106294 +snd_als4k_gcr_read_24840 snd_als4k_gcr_read 0 24840 NULL
106295 +btrfs_next_old_item_24843 btrfs_next_old_item 0 24843 NULL
106296 +xfs_free_file_space_24853 xfs_free_file_space 2-3 24853 NULL
106297 +snd_pcm_lib_buffer_bytes_24865 snd_pcm_lib_buffer_bytes 0 24865 NULL
106298 +pnp_alloc_24869 pnp_alloc 1 24869 NULL nohasharray
106299 +l2cap_create_basic_pdu_24869 l2cap_create_basic_pdu 3 24869 &pnp_alloc_24869
106300 +setup_buffering_24872 setup_buffering 3 24872 NULL
106301 +queues_read_24877 queues_read 3 24877 NULL
106302 +__vxge_hw_vp_initialize_24885 __vxge_hw_vp_initialize 2 24885 NULL
106303 +codec_list_read_file_24910 codec_list_read_file 3 24910 NULL
106304 +__btrfs_free_extent_24927 __btrfs_free_extent 7-0 24927 NULL nohasharray
106305 +v4l2_ctrl_new_24927 v4l2_ctrl_new 7 24927 &__btrfs_free_extent_24927
106306 +ocfs2_fiemap_24949 ocfs2_fiemap 4-3 24949 NULL
106307 +packet_sendmsg_24954 packet_sendmsg 4 24954 NULL
106308 +ensure_wear_leveling_24971 ensure_wear_leveling 0 24971 NULL
106309 +twl_i2c_write_u8_24976 twl_i2c_write_u8 3 24976 NULL
106310 +llc_ui_sendmsg_24987 llc_ui_sendmsg 4 24987 NULL
106311 +btrfs_update_delayed_inode_24988 btrfs_update_delayed_inode 0 24988 NULL
106312 +key_conf_hw_key_idx_read_25003 key_conf_hw_key_idx_read 3 25003 NULL
106313 +il_dbgfs_channels_read_25005 il_dbgfs_channels_read 3 25005 NULL
106314 +twl6030_irq_unmap_25014 twl6030_irq_unmap 2 25014 NULL
106315 +ni_660x_num_counters_25031 ni_660x_num_counters 0 25031 NULL
106316 +nfs_dns_resolve_name_25036 nfs_dns_resolve_name 3 25036 NULL
106317 +load_unaligned_zeropad_25050 load_unaligned_zeropad 0 25050 NULL
106318 +btrfs_stack_key_blockptr_25058 btrfs_stack_key_blockptr 0 25058 NULL
106319 +gs_buf_alloc_25067 gs_buf_alloc 2 25067 NULL
106320 +ll_track_pid_seq_write_25068 ll_track_pid_seq_write 3 25068 NULL
106321 +SYSC_listxattr_25072 SYSC_listxattr 3 25072 NULL
106322 +iwl_dbgfs_tx_flush_write_25091 iwl_dbgfs_tx_flush_write 3 25091 NULL
106323 +gfs2_iter_unstuffed_25099 gfs2_iter_unstuffed 0 25099 NULL nohasharray
106324 +blkg_path_25099 blkg_path 3 25099 &gfs2_iter_unstuffed_25099
106325 +snd_rawmidi_kernel_write_25106 snd_rawmidi_kernel_write 3 25106 NULL
106326 +fs32_to_cpu_25143 fs32_to_cpu 0 25143 NULL
106327 +record_new_ref_25154 record_new_ref 0 25154 NULL
106328 +ipath_init_qp_table_25167 ipath_init_qp_table 2 25167 NULL nohasharray
106329 +btrfs_delete_delayed_items_25167 btrfs_delete_delayed_items 0 25167 &ipath_init_qp_table_25167
106330 +kvm_mmu_notifier_change_pte_25169 kvm_mmu_notifier_change_pte 3 25169 NULL
106331 +sctp_getsockopt_local_addrs_25178 sctp_getsockopt_local_addrs 2 25178 NULL
106332 +res_counter_read_u64_25180 res_counter_read_u64 0 25180 NULL
106333 +send_create_inode_if_needed_25211 send_create_inode_if_needed 0 25211 NULL
106334 +security_socket_post_create_25221 security_socket_post_create 0 25221 NULL
106335 +ocfs2_block_check_compute_25223 ocfs2_block_check_compute 2 25223 NULL
106336 +dtf_write_string_25232 dtf_write_string 5 25232 NULL
106337 +mon_stat_read_25238 mon_stat_read 3 25238 NULL
106338 +nilfs_palloc_find_available_slot_25245 nilfs_palloc_find_available_slot 5-3 25245 NULL
106339 +stripe_status_25259 stripe_status 5 25259 NULL
106340 +snd_pcm_start_25273 snd_pcm_start 0 25273 NULL
106341 +crypto_alloc_instance2_25277 crypto_alloc_instance2 3 25277 NULL
106342 +vfs_writev_25278 vfs_writev 3 25278 NULL
106343 +l2tp_session_create_25286 l2tp_session_create 1 25286 NULL
106344 +ath9k_debugfs_read_buf_25316 ath9k_debugfs_read_buf 3 25316 NULL
106345 +rng_buffer_size_25348 rng_buffer_size 0 25348 NULL
106346 +SYSC_kexec_load_25361 SYSC_kexec_load 2 25361 NULL
106347 +rio_destid_next_25368 rio_destid_next 2-0 25368 NULL nohasharray
106348 +unix_mkname_25368 unix_mkname 0-2 25368 &rio_destid_next_25368
106349 +sel_read_mls_25369 sel_read_mls 3 25369 NULL
106350 +tc3589x_gpio_to_irq_25371 tc3589x_gpio_to_irq 2 25371 NULL
106351 +orphanize_inode_25382 orphanize_inode 0 25382 NULL
106352 +SyS_splice_25390 SyS_splice 5 25390 NULL
106353 +btrfs_update_reserved_bytes_25393 btrfs_update_reserved_bytes 0 25393 NULL
106354 +vsp1_entity_init_25407 vsp1_entity_init 3 25407 NULL
106355 +ebt_buf_add_pad_25413 ebt_buf_add_pad 0 25413 NULL
106356 +dai_list_read_file_25421 dai_list_read_file 3 25421 NULL
106357 +generic_file_buffered_write_25464 generic_file_buffered_write 4 25464 NULL
106358 +ipath_decode_err_25468 ipath_decode_err 3 25468 NULL
106359 +crypto_hash_digestsize_25469 crypto_hash_digestsize 0 25469 NULL
106360 +ocfs2_hamming_encode_25501 ocfs2_hamming_encode 3 25501 NULL
106361 +ivtv_buf_copy_from_user_25502 ivtv_buf_copy_from_user 4-0 25502 NULL
106362 +evm_init_hmac_25504 evm_init_hmac 0 25504 NULL
106363 +snd_pcm_plugin_build_25505 snd_pcm_plugin_build 5 25505 NULL
106364 +sb_permission_25523 sb_permission 0 25523 NULL
106365 +down_read_failed_25532 down_read_failed 2 25532 NULL
106366 +ext3_get_inode_loc_25542 ext3_get_inode_loc 0 25542 NULL
106367 +ieee80211_if_read_path_refresh_time_25545 ieee80211_if_read_path_refresh_time 3 25545 NULL
106368 +wimax_addr_scnprint_25548 wimax_addr_scnprint 2 25548 NULL
106369 +mgag200_ttm_tt_create_25550 mgag200_ttm_tt_create 2 25550 NULL
106370 +ht_print_chan_25556 ht_print_chan 0 25556 NULL
106371 +skb_tailroom_25567 skb_tailroom 0 25567 NULL
106372 +ping_recvmsg_25597 ping_recvmsg 4 25597 NULL nohasharray
106373 +find_extend_vma_25597 find_extend_vma 2 25597 &ping_recvmsg_25597
106374 +copy_user_generic_25611 copy_user_generic 0 25611 NULL
106375 +proc_coredump_filter_write_25625 proc_coredump_filter_write 3 25625 NULL
106376 +__get_user_pages_25628 __get_user_pages 0-3-4 25628 NULL nohasharray
106377 +befs_utf2nls_25628 befs_utf2nls 3 25628 &__get_user_pages_25628
106378 +__direct_map_25647 __direct_map 6-5 25647 NULL
106379 +ext2_try_to_allocate_25667 ext2_try_to_allocate 4-2-0 25667 NULL
106380 +aircable_prepare_write_buffer_25669 aircable_prepare_write_buffer 3 25669 NULL
106381 +lpfc_idiag_cmd_get_25672 lpfc_idiag_cmd_get 2 25672 NULL
106382 +sta_inactive_ms_read_25690 sta_inactive_ms_read 3 25690 NULL
106383 +ebitmap_start_positive_25703 ebitmap_start_positive 0 25703 NULL
106384 +wl1271_tx_enabled_rates_get_25712 wl1271_tx_enabled_rates_get 0 25712 NULL nohasharray
106385 +rx_filter_mc_filter_read_25712 rx_filter_mc_filter_read 3 25712 &wl1271_tx_enabled_rates_get_25712
106386 +ibmasm_new_command_25714 ibmasm_new_command 2 25714 NULL
106387 +ttm_tt_init_25717 ttm_tt_init 3 25717 NULL
106388 +sel_write_context_25726 sel_write_context 3 25726 NULL nohasharray
106389 +__alloc_bootmem_low_node_25726 __alloc_bootmem_low_node 2 25726 &sel_write_context_25726
106390 +ext2_find_near_25734 ext2_find_near 0 25734 NULL
106391 +__set_clear_dirty_25744 __set_clear_dirty 2 25744 NULL
106392 +cxgbi_device_portmap_create_25747 cxgbi_device_portmap_create 3 25747 NULL
106393 +dtf_write_channel_25748 dtf_write_channel 3 25748 NULL
106394 +event_rx_pool_read_25792 event_rx_pool_read 3 25792 NULL
106395 +sg_read_25799 sg_read 3 25799 NULL
106396 +system_enable_read_25815 system_enable_read 3 25815 NULL
106397 +realloc_buffer_25816 realloc_buffer 2 25816 NULL
106398 +ftrace_profile_init_25821 ftrace_profile_init 0 25821 NULL
106399 +mthca_map_user_db_25823 mthca_map_user_db 5 25823 NULL
106400 +pwr_missing_bcns_read_25824 pwr_missing_bcns_read 3 25824 NULL
106401 +parport_read_25855 parport_read 0 25855 NULL
106402 +xfs_dir2_sf_hdr_size_25858 xfs_dir2_sf_hdr_size 0 25858 NULL
106403 +key_attr_size_25865 key_attr_size 0 25865 NULL
106404 +ath6kl_regread_read_25884 ath6kl_regread_read 3 25884 NULL
106405 +run_delalloc_nocow_25896 run_delalloc_nocow 3-4 25896 NULL
106406 +sisusbcon_scroll_area_25899 sisusbcon_scroll_area 4-3 25899 NULL
106407 +lpfc_change_queue_depth_25905 lpfc_change_queue_depth 2 25905 NULL
106408 +nvme_trans_mode_page_create_25908 nvme_trans_mode_page_create 7-4 25908 NULL
106409 +do_jffs2_setxattr_25910 do_jffs2_setxattr 5 25910 NULL
106410 +rcname_read_25919 rcname_read 3 25919 NULL
106411 +snd_es1938_capture_copy_25930 snd_es1938_capture_copy 5 25930 NULL
106412 +key_flags_read_25931 key_flags_read 3 25931 NULL
106413 +copy_play_buf_25932 copy_play_buf 3 25932 NULL
106414 +flush_25957 flush 2 25957 NULL
106415 +video_register_device_25971 video_register_device 3 25971 NULL
106416 +udp_setsockopt_25985 udp_setsockopt 5 25985 NULL
106417 +lustre_msg_buflen_v2_25997 lustre_msg_buflen_v2 0 25997 NULL
106418 +ebt_compat_entry_padsize_26001 ebt_compat_entry_padsize 0 26001 NULL
106419 +blk_init_queue_node_26015 blk_init_queue_node 3 26015 NULL
106420 +SyS_process_vm_readv_26019 SyS_process_vm_readv 3-5 26019 NULL
106421 +irq_create_strict_mappings_26025 irq_create_strict_mappings 2-4 26025 NULL
106422 +xfs_xattr_acl_set_26028 xfs_xattr_acl_set 4 26028 NULL
106423 +mptscsih_change_queue_depth_26036 mptscsih_change_queue_depth 2 26036 NULL
106424 +selinux_inode_post_setxattr_26037 selinux_inode_post_setxattr 4 26037 NULL
106425 +tun_do_read_26047 tun_do_read 5 26047 NULL
106426 +make_bfloat_26056 make_bfloat 2 26056 NULL
106427 +keyctl_update_key_26061 keyctl_update_key 3 26061 NULL
106428 +rx_rx_wa_density_dropped_frame_read_26095 rx_rx_wa_density_dropped_frame_read 3 26095 NULL
106429 +__add_inline_refs_26096 __add_inline_refs 0 26096 NULL
106430 +dup_ref_26100 dup_ref 0 26100 NULL
106431 +intel_wrap_ring_buffer_26117 intel_wrap_ring_buffer 0 26117 NULL
106432 +read_sb_page_26119 read_sb_page 5 26119 NULL
106433 +__fswab64_26155 __fswab64 0 26155 NULL
106434 +copy_oldmem_page_26164 copy_oldmem_page 3 26164 NULL
106435 +gfs2_xattr_acl_get_26166 gfs2_xattr_acl_get 0 26166 NULL nohasharray
106436 +ath6kl_roam_table_read_26166 ath6kl_roam_table_read 3 26166 &gfs2_xattr_acl_get_26166
106437 +disk_devt_26180 disk_devt 0 26180 NULL
106438 +cgroup_setxattr_26188 cgroup_setxattr 4 26188 NULL
106439 +ieee80211_if_fmt_dot11MeshTTL_26198 ieee80211_if_fmt_dot11MeshTTL 3 26198 NULL
106440 +xfs_idata_realloc_26199 xfs_idata_realloc 2 26199 NULL
106441 +mce_write_26201 mce_write 3 26201 NULL
106442 +SyS_splice_26206 SyS_splice 5 26206 NULL
106443 +mwifiex_regrdwr_write_26225 mwifiex_regrdwr_write 3 26225 NULL
106444 +_scsih_change_queue_depth_26230 _scsih_change_queue_depth 2 26230 NULL
106445 +rxrpc_recvmsg_26233 rxrpc_recvmsg 4 26233 NULL
106446 +bio_split_26235 bio_split 2 26235 NULL
106447 +btrfs_delayed_inode_reserve_metadata_26236 btrfs_delayed_inode_reserve_metadata 0 26236 NULL
106448 +ecryptfs_read_lower_page_segment_26244 ecryptfs_read_lower_page_segment 4 26244 NULL
106449 +do_setxattr_26264 do_setxattr 0 26264 NULL
106450 +crypto_ctxsize_26278 crypto_ctxsize 0 26278 NULL
106451 +apei_resources_request_26279 apei_resources_request 0 26279 NULL
106452 +wacom_set_device_mode_26280 wacom_set_device_mode 3 26280 NULL
106453 +ext2_find_goal_26306 ext2_find_goal 0 26306 NULL
106454 +snd_pcm_plug_client_channels_buf_26309 snd_pcm_plug_client_channels_buf 0-3 26309 NULL nohasharray
106455 +pax_get_random_long_26309 pax_get_random_long 0 26309 &snd_pcm_plug_client_channels_buf_26309
106456 +pwr_wake_on_host_read_26321 pwr_wake_on_host_read 3 26321 NULL
106457 +check_can_nocow_26336 check_can_nocow 2 26336 NULL
106458 +snd_vx_check_reg_bit_26344 snd_vx_check_reg_bit 0 26344 NULL
106459 +ocfs2_duplicate_clusters_by_page_26357 ocfs2_duplicate_clusters_by_page 5-0-6-3 26357 NULL
106460 +cifs_readdata_alloc_26360 cifs_readdata_alloc 1 26360 NULL
106461 +invalidate_inode_pages2_range_26403 invalidate_inode_pages2_range 0 26403 NULL
106462 +ntty_write_26404 ntty_write 3 26404 NULL
106463 +pagemap_read_26441 pagemap_read 3 26441 NULL
106464 +tower_read_26461 tower_read 3 26461 NULL nohasharray
106465 +enc_pools_add_pages_26461 enc_pools_add_pages 1 26461 &tower_read_26461 nohasharray
106466 +numa_cpu_node_26461 numa_cpu_node 0 26461 &enc_pools_add_pages_26461
106467 +memory_present_26470 memory_present 1 26470 NULL
106468 +ib_alloc_device_26483 ib_alloc_device 1 26483 NULL
106469 +ulong_write_file_26485 ulong_write_file 3 26485 NULL
106470 +dvb_ca_en50221_io_ioctl_26490 dvb_ca_en50221_io_ioctl 2 26490 NULL
106471 +vfio_pin_pages_26494 vfio_pin_pages 1 26494 NULL
106472 +read_vmcore_26501 read_vmcore 3 26501 NULL
106473 +uhid_char_write_26502 uhid_char_write 3 26502 NULL
106474 +vfio_pci_set_msi_trigger_26507 vfio_pci_set_msi_trigger 4-3 26507 NULL
106475 +iwl_dbgfs_rf_reset_read_26512 iwl_dbgfs_rf_reset_read 3 26512 NULL
106476 +SyS_rt_sigpending_26538 SyS_rt_sigpending 2 26538 NULL
106477 +rds_message_inc_copy_to_user_26540 rds_message_inc_copy_to_user 3 26540 NULL
106478 +__vhost_add_used_n_26554 __vhost_add_used_n 3 26554 NULL
106479 +qgroup_account_ref_step3_26558 qgroup_account_ref_step3 0 26558 NULL
106480 +dio_new_bio_26562 dio_new_bio 0 26562 NULL
106481 +kvm_iommu_put_pages_26571 kvm_iommu_put_pages 2 26571 NULL
106482 +rts51x_read_mem_26577 rts51x_read_mem 4 26577 NULL
106483 +regcache_set_reg_present_26598 regcache_set_reg_present 2 26598 NULL
106484 +__unmap_single_26604 __unmap_single 2-3 26604 NULL
106485 +dev_to_node_26617 dev_to_node 0 26617 NULL
106486 +iommu_alloc_26621 iommu_alloc 4 26621 NULL
106487 +pack_value_26625 pack_value 1 26625 NULL
106488 +pwr_fix_tsf_ps_read_26627 pwr_fix_tsf_ps_read 3 26627 NULL
106489 +drm_ht_find_item_26637 drm_ht_find_item 0 26637 NULL
106490 +process_extent_26646 process_extent 0 26646 NULL nohasharray
106491 +btrfs_lookup_inode_26646 btrfs_lookup_inode 0 26646 &process_extent_26646
106492 +irq_alloc_generic_chip_26650 irq_alloc_generic_chip 2 26650 NULL nohasharray
106493 +inb_p_26650 inb_p 0 26650 &irq_alloc_generic_chip_26650
106494 +usb_reset_device_26661 usb_reset_device 0 26661 NULL
106495 +cipso_v4_map_cat_rbm_hton_26680 cipso_v4_map_cat_rbm_hton 0 26680 NULL
106496 +nouveau_namedb_create__26732 nouveau_namedb_create_ 7 26732 NULL
106497 +SyS_fcntl_26737 SyS_fcntl 3 26737 NULL
106498 +pipeline_tcp_rx_stat_fifo_int_read_26745 pipeline_tcp_rx_stat_fifo_int_read 3 26745 NULL
106499 +bos_desc_26752 bos_desc 0 26752 NULL
106500 +snd_hda_get_raw_connections_26762 snd_hda_get_raw_connections 0 26762 NULL
106501 +dma_map_single_attrs_26779 dma_map_single_attrs 0 26779 NULL
106502 +cache_save_setup_26783 cache_save_setup 0 26783 NULL
106503 +qlcnic_alloc_sds_rings_26795 qlcnic_alloc_sds_rings 2 26795 NULL
106504 +cipso_v4_genopt_26812 cipso_v4_genopt 0 26812 NULL
106505 +iwl_trans_read_mem32_26825 iwl_trans_read_mem32 0 26825 NULL
106506 +smk_write_load_26829 smk_write_load 3 26829 NULL
106507 +ept_get_level1_sp_gpa_26831 ept_get_level1_sp_gpa 0 26831 NULL
106508 +__nodes_onto_26838 __nodes_onto 4 26838 NULL
106509 +scnprint_id_26842 scnprint_id 3-0 26842 NULL
106510 +ecryptfs_miscdev_write_26847 ecryptfs_miscdev_write 3 26847 NULL
106511 +tipc_conn_sendmsg_26867 tipc_conn_sendmsg 5 26867 NULL
106512 +ath6kl_create_qos_write_26879 ath6kl_create_qos_write 3 26879 NULL
106513 +svc_print_xprts_26881 svc_print_xprts 0 26881 NULL
106514 +cfg80211_process_auth_26916 cfg80211_process_auth 3 26916 NULL
106515 +x25_asy_change_mtu_26928 x25_asy_change_mtu 2 26928 NULL
106516 +compat_mtw_from_user_26932 compat_mtw_from_user 0 26932 NULL
106517 +scsi_tgt_copy_sense_26933 scsi_tgt_copy_sense 3 26933 NULL
106518 +pwr_ps_enter_read_26935 pwr_ps_enter_read 3 26935 NULL nohasharray
106519 +sctp_setsockopt_adaptation_layer_26935 sctp_setsockopt_adaptation_layer 3 26935 &pwr_ps_enter_read_26935
106520 +remove_from_bitmap_26939 remove_from_bitmap 0 26939 NULL
106521 +create_bm_block_list_26940 create_bm_block_list 0 26940 NULL
106522 +hecubafb_write_26942 hecubafb_write 3 26942 NULL
106523 +extract_entropy_user_26952 extract_entropy_user 3 26952 NULL nohasharray
106524 +do_trimming_26952 do_trimming 3 26952 &extract_entropy_user_26952
106525 +xlog_grant_push_ail_26972 xlog_grant_push_ail 2 26972 NULL
106526 +do_direct_IO_26979 do_direct_IO 0 26979 NULL
106527 +pcf857x_irq_domain_map_26998 pcf857x_irq_domain_map 2 26998 NULL
106528 +i2c_smbus_xfer_27006 i2c_smbus_xfer 0 27006 NULL
106529 +omfs_allocate_range_27034 omfs_allocate_range 3 27034 NULL
106530 +fill_read_buf_27036 fill_read_buf 0 27036 NULL
106531 +ufs_alloc_fragments_27059 ufs_alloc_fragments 3-0-2 27059 NULL
106532 +__videobuf_alloc_vb_27062 __videobuf_alloc_vb 1 27062 NULL
106533 +ext4_convert_unwritten_extents_27064 ext4_convert_unwritten_extents 4-3-0 27064 NULL
106534 +snd_pcm_lib_period_bytes_27071 snd_pcm_lib_period_bytes 0 27071 NULL
106535 +paravirt_read_msr_27077 paravirt_read_msr 0 27077 NULL
106536 +snd_mixer_oss_set_recsrc_27080 snd_mixer_oss_set_recsrc 2 27080 NULL
106537 +alloc_fdmem_27083 alloc_fdmem 1 27083 NULL
106538 +find_first_bit_27088 find_first_bit 0-2 27088 NULL
106539 +btmrvl_hscmd_write_27089 btmrvl_hscmd_write 3 27089 NULL
106540 +nes_reg_user_mr_27106 nes_reg_user_mr 2-3 27106 NULL
106541 +__devcgroup_inode_permission_27108 __devcgroup_inode_permission 0 27108 NULL
106542 +i915_gem_execbuffer_relocate_slow_27116 i915_gem_execbuffer_relocate_slow 0 27116 NULL
106543 +get_kernel_page_27133 get_kernel_page 0 27133 NULL
106544 +__ext4_handle_dirty_metadata_27137 __ext4_handle_dirty_metadata 0 27137 NULL
106545 +drbd_get_capacity_27141 drbd_get_capacity 0 27141 NULL
106546 +pms_capture_27142 pms_capture 4 27142 NULL
106547 +btmrvl_hscfgcmd_write_27143 btmrvl_hscfgcmd_write 3 27143 NULL
106548 +snd_compr_calc_avail_27165 snd_compr_calc_avail 0 27165 NULL
106549 +ieee80211_if_read_rc_rateidx_mask_5ghz_27183 ieee80211_if_read_rc_rateidx_mask_5ghz 3 27183 NULL
106550 +get_unaligned_be32_27184 get_unaligned_be32 0 27184 NULL
106551 +__intel_ring_begin_27197 __intel_ring_begin 0 27197 NULL
106552 +__sg_alloc_table_27198 __sg_alloc_table 0 27198 NULL
106553 +ocfs2_read_blocks_sync_27210 ocfs2_read_blocks_sync 0 27210 NULL
106554 +write_kmem_27225 write_kmem 3 27225 NULL
106555 +dbAllocAG_27228 dbAllocAG 0 27228 NULL
106556 +rxrpc_request_key_27235 rxrpc_request_key 3 27235 NULL
106557 +ocfs2_journal_access_path_27243 ocfs2_journal_access_path 0 27243 NULL
106558 +ll_track_gid_seq_write_27267 ll_track_gid_seq_write 3 27267 NULL
106559 +qfq_unblock_groups_27269 qfq_unblock_groups 2 27269 NULL
106560 +comedi_alloc_devpriv_27272 comedi_alloc_devpriv 2 27272 NULL
106561 +__dma_map_cont_27289 __dma_map_cont 5 27289 NULL
106562 +copy_from_buf_27308 copy_from_buf 2-4 27308 NULL
106563 +virtqueue_add_inbuf_27312 virtqueue_add_inbuf 3 27312 NULL
106564 +write_buf_27316 write_buf 0-3 27316 NULL
106565 +ocfs2_blocks_to_clusters_27327 ocfs2_blocks_to_clusters 0-2 27327 NULL
106566 +snd_pcm_oss_write2_27332 snd_pcm_oss_write2 3-0 27332 NULL
106567 +afs_cell_create_27346 afs_cell_create 2 27346 NULL
106568 +iwl_dbgfs_csr_write_27363 iwl_dbgfs_csr_write 3 27363 NULL
106569 +pcbit_stat_27364 pcbit_stat 2 27364 NULL
106570 +lz4_compress_crypto_27387 lz4_compress_crypto 3 27387 NULL
106571 +seq_read_27411 seq_read 3 27411 NULL
106572 +ib_dma_map_sg_27413 ib_dma_map_sg 0 27413 NULL nohasharray
106573 +zalloc_cpumask_var_node_27413 zalloc_cpumask_var_node 3 27413 &ib_dma_map_sg_27413
106574 +ieee80211_if_read_smps_27416 ieee80211_if_read_smps 3 27416 NULL
106575 +ocfs2_refcount_cal_cow_clusters_27422 ocfs2_refcount_cal_cow_clusters 0-3-4 27422 NULL nohasharray
106576 +evm_inode_init_security_27422 evm_inode_init_security 0 27422 &ocfs2_refcount_cal_cow_clusters_27422
106577 +cypress_write_27423 cypress_write 4 27423 NULL
106578 +sddr09_read_data_27447 sddr09_read_data 3 27447 NULL
106579 +ktime_to_us_27455 ktime_to_us 0 27455 NULL
106580 +sk_extract_addr_27474 sk_extract_addr 0 27474 NULL
106581 +v4l2_ctrl_new_std_menu_items_27487 v4l2_ctrl_new_std_menu_items 4 27487 NULL
106582 +hcd_buffer_alloc_27495 hcd_buffer_alloc 2 27495 NULL
106583 +qib_create_cq_27497 qib_create_cq 2 27497 NULL
106584 +ip_set_get_h32_27498 ip_set_get_h32 0 27498 NULL
106585 +btrfs_get_64_27499 btrfs_get_64 0 27499 NULL
106586 +ept_walk_addr_generic_27501 ept_walk_addr_generic 4 27501 NULL
106587 +garmin_read_process_27509 garmin_read_process 3 27509 NULL
106588 +oti_alloc_cookies_27510 oti_alloc_cookies 2 27510 NULL
106589 +ib_copy_to_udata_27525 ib_copy_to_udata 3 27525 NULL
106590 +snd_sonicvibes_getdmaa_27552 snd_sonicvibes_getdmaa 0 27552 NULL
106591 +SyS_fgetxattr_27571 SyS_fgetxattr 4 27571 NULL
106592 +sco_sock_recvmsg_27572 sco_sock_recvmsg 4 27572 NULL
106593 +libipw_alloc_txb_27579 libipw_alloc_txb 1 27579 NULL
106594 +read_flush_procfs_27642 read_flush_procfs 3 27642 NULL nohasharray
106595 +nl80211_send_connect_result_27642 nl80211_send_connect_result 5-7 27642 &read_flush_procfs_27642 nohasharray
106596 +ocfs2_direct_IO_27642 ocfs2_direct_IO 4 27642 &nl80211_send_connect_result_27642 nohasharray
106597 +ocfs2_xattr_ibody_get_27642 ocfs2_xattr_ibody_get 0 27642 &ocfs2_direct_IO_27642
106598 +add_new_gdb_27643 add_new_gdb 3 27643 NULL
106599 +btrfs_fallocate_27647 btrfs_fallocate 3-4 27647 NULL
106600 +qnx6_readpages_27657 qnx6_readpages 4 27657 NULL
106601 +cdrom_read_cdda_old_27664 cdrom_read_cdda_old 4 27664 NULL
106602 +record_changed_ref_27665 record_changed_ref 0 27665 NULL
106603 +iommu_pgsize_27677 iommu_pgsize 2-0 27677 NULL
106604 +ocfs2_extend_dir_27695 ocfs2_extend_dir 4 27695 NULL
106605 +fs_path_add_from_extent_buffer_27702 fs_path_add_from_extent_buffer 4-0 27702 NULL
106606 +evm_write_key_27715 evm_write_key 3 27715 NULL
106607 +ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol_27722 ieee80211_if_fmt_dot11MeshGateAnnouncementProtocol 3 27722 NULL
106608 +xfs_dir2_block_sfsize_27727 xfs_dir2_block_sfsize 0 27727 NULL
106609 +macvtap_skb_to_vnet_hdr_27747 macvtap_skb_to_vnet_hdr 0 27747 NULL
106610 +SyS_setsockopt_27759 SyS_setsockopt 5 27759 NULL
106611 +gpiochip_export_27776 gpiochip_export 0 27776 NULL
106612 +__lov_setstripe_27782 __lov_setstripe 2 27782 NULL
106613 +twl4030_set_gpio_dataout_27792 twl4030_set_gpio_dataout 1 27792 NULL
106614 +DivaSTraceGetMemotyRequirement_27797 DivaSTraceGetMemotyRequirement 0-1 27797 NULL
106615 +ttm_object_file_init_27804 ttm_object_file_init 2 27804 NULL nohasharray
106616 +SyS_readv_27804 SyS_readv 3 27804 &ttm_object_file_init_27804
106617 +mpihelp_mul_27805 mpihelp_mul 5-3 27805 NULL
106618 +fwtty_buffer_rx_27821 fwtty_buffer_rx 3 27821 NULL
106619 +hpt374_read_freq_27828 hpt374_read_freq 0 27828 NULL
106620 +init_header_complete_27833 init_header_complete 0 27833 NULL
106621 +read_profile_27859 read_profile 3 27859 NULL
106622 +sky2_pci_read16_27863 sky2_pci_read16 0 27863 NULL
106623 +ieee80211_if_read_dot11MeshHWMProotInterval_27873 ieee80211_if_read_dot11MeshHWMProotInterval 3 27873 NULL
106624 +process_all_extents_27879 process_all_extents 0 27879 NULL
106625 +unix_seqpacket_sendmsg_27893 unix_seqpacket_sendmsg 4 27893 NULL
106626 +gluebi_write_27905 gluebi_write 3-2 27905 NULL
106627 +SyS_ptrace_27924 SyS_ptrace 3 27924 NULL
106628 +bm_find_next_27929 bm_find_next 2 27929 NULL
106629 +check_mapped_name_27943 check_mapped_name 3 27943 NULL
106630 +i915_gem_execbuffer_reserve_object_27956 i915_gem_execbuffer_reserve_object 0 27956 NULL
106631 +tracing_clock_write_27961 tracing_clock_write 3 27961 NULL
106632 +tipc_media_addr_printf_27971 tipc_media_addr_printf 2 27971 NULL
106633 +device_register_27972 device_register 0 27972 NULL nohasharray
106634 +mic_rx_pkts_read_27972 mic_rx_pkts_read 3 27972 &device_register_27972
106635 +pci_enable_device_flags_27977 pci_enable_device_flags 0 27977 NULL
106636 +f2fs_bio_alloc_27983 f2fs_bio_alloc 2 27983 NULL
106637 +edt_ft5x06_debugfs_raw_data_read_28002 edt_ft5x06_debugfs_raw_data_read 3 28002 NULL
106638 +snd_rawmidi_write_28008 snd_rawmidi_write 3 28008 NULL
106639 +remove_extent_data_ref_28023 remove_extent_data_ref 0 28023 NULL
106640 +alloc_one_pg_vec_page_28031 alloc_one_pg_vec_page 1 28031 NULL
106641 +record_deleted_ref_28039 record_deleted_ref 0 28039 NULL
106642 +sctp_setsockopt_maxburst_28041 sctp_setsockopt_maxburst 3 28041 NULL
106643 +rts51x_xd_rw_28046 rts51x_xd_rw 3-4 28046 NULL
106644 +p9_fd_write_28050 p9_fd_write 3 28050 NULL
106645 +cx231xx_init_vbi_isoc_28053 cx231xx_init_vbi_isoc 3-2-4 28053 NULL
106646 +pool_status_28055 pool_status 5 28055 NULL
106647 +init_rs_non_canonical_28059 init_rs_non_canonical 1 28059 NULL
106648 +lpfc_idiag_mbxacc_read_28061 lpfc_idiag_mbxacc_read 3 28061 NULL
106649 +tx_frag_bad_mblk_num_read_28064 tx_frag_bad_mblk_num_read 3 28064 NULL
106650 +ext4_read_block_bitmap_nowait_28078 ext4_read_block_bitmap_nowait 2 28078 NULL
106651 +platform_get_irq_28088 platform_get_irq 0 28088 NULL
106652 +SyS_brk_28096 SyS_brk 1 28096 NULL
106653 +mmc_test_alloc_mem_28102 mmc_test_alloc_mem 2-3 28102 NULL
106654 +rx_defrag_need_defrag_read_28117 rx_defrag_need_defrag_read 3 28117 NULL
106655 +vgacon_adjust_height_28124 vgacon_adjust_height 2 28124 NULL
106656 +tipc_msg_init_28128 tipc_msg_init 4-2 28128 NULL
106657 +video_read_28148 video_read 3 28148 NULL
106658 +snd_midi_channel_alloc_set_28153 snd_midi_channel_alloc_set 1 28153 NULL
106659 +stats_dot11FCSErrorCount_read_28154 stats_dot11FCSErrorCount_read 3 28154 NULL
106660 +__add_reloc_root_28155 __add_reloc_root 0 28155 NULL
106661 +vread_28173 vread 0-3 28173 NULL
106662 +macvtap_get_user_28185 macvtap_get_user 4 28185 NULL
106663 +counter_free_res_28187 counter_free_res 5 28187 NULL
106664 +read_disk_sb_28188 read_disk_sb 2 28188 NULL
106665 +nouveau_mxm_create__28200 nouveau_mxm_create_ 4 28200 NULL
106666 +__exclude_logged_extent_28212 __exclude_logged_extent 2-0 28212 NULL
106667 +__qp_memcpy_from_queue_28220 __qp_memcpy_from_queue 3-4 28220 NULL
106668 +ocfs2_cow_sync_writeback_28221 ocfs2_cow_sync_writeback 0 28221 NULL
106669 +line6_alloc_sysex_buffer_28225 line6_alloc_sysex_buffer 4 28225 NULL
106670 +amd_nb_num_28228 amd_nb_num 0 28228 NULL
106671 +ext4_validate_block_bitmap_28243 ext4_validate_block_bitmap 3 28243 NULL
106672 +fuse_direct_IO_28275 fuse_direct_IO 4 28275 NULL
106673 +usemap_size_28281 usemap_size 0 28281 NULL
106674 +inline_xattr_size_28285 inline_xattr_size 0 28285 NULL
106675 +dma_map_sg_attrs_28289 dma_map_sg_attrs 0 28289 NULL
106676 +SyS_ppoll_28290 SyS_ppoll 2 28290 NULL
106677 +kstrtos16_from_user_28300 kstrtos16_from_user 2 28300 NULL
106678 +acpi_register_gsi_xen_28305 acpi_register_gsi_xen 2 28305 NULL nohasharray
106679 +nouveau_compat_ioctl_28305 nouveau_compat_ioctl 2 28305 &acpi_register_gsi_xen_28305
106680 +create_io_context_28306 create_io_context 2 28306 NULL
106681 +__mlock_vma_pages_range_28315 __mlock_vma_pages_range 2-3-0 28315 NULL
106682 +snd_pcm_oss_read_28317 snd_pcm_oss_read 3 28317 NULL
106683 +bm_entry_write_28338 bm_entry_write 3 28338 NULL
106684 +tcp_copy_to_iovec_28344 tcp_copy_to_iovec 3 28344 NULL
106685 +cpuset_spread_node_28351 cpuset_spread_node 0 28351 NULL nohasharray
106686 +snapshot_write_28351 snapshot_write 3 28351 &cpuset_spread_node_28351
106687 +xfs_iomap_write_unwritten_28365 xfs_iomap_write_unwritten 3-2 28365 NULL
106688 +dlmfs_file_read_28385 dlmfs_file_read 3 28385 NULL
106689 +tx_frag_cache_miss_read_28394 tx_frag_cache_miss_read 3 28394 NULL
106690 +bypass_pwup_write_28416 bypass_pwup_write 3 28416 NULL
106691 +subdev_ioctl_28417 subdev_ioctl 2 28417 NULL
106692 +get_extent_allocation_hint_28423 get_extent_allocation_hint 0 28423 NULL
106693 +ksocknal_alloc_tx_28426 ksocknal_alloc_tx 2 28426 NULL
106694 +mpage_readpages_28436 mpage_readpages 3 28436 NULL
106695 +snd_emu10k1_efx_read_28452 snd_emu10k1_efx_read 2 28452 NULL
106696 +key_mic_failures_read_28457 key_mic_failures_read 3 28457 NULL
106697 +alloc_irq_cpu_rmap_28459 alloc_irq_cpu_rmap 1 28459 NULL
106698 +vmw_du_crtc_cursor_set_28479 vmw_du_crtc_cursor_set 4-5 28479 NULL
106699 +ocfs2_backup_super_blkno_28484 ocfs2_backup_super_blkno 0-2 28484 NULL
106700 +__filemap_fdatawrite_28485 __filemap_fdatawrite 0 28485 NULL
106701 +clear_discard_28494 clear_discard 2 28494 NULL
106702 +ps_poll_upsd_utilization_read_28519 ps_poll_upsd_utilization_read 3 28519 NULL
106703 +__next_node_28521 __next_node 1-0 28521 NULL
106704 +i2400m_tx_stats_read_28527 i2400m_tx_stats_read 3 28527 NULL
106705 +sel_read_policycap_28544 sel_read_policycap 3 28544 NULL
106706 +run_delalloc_range_28545 run_delalloc_range 3-4 28545 NULL nohasharray
106707 +mptctl_getiocinfo_28545 mptctl_getiocinfo 2 28545 &run_delalloc_range_28545 nohasharray
106708 +aio_read_events_28545 aio_read_events 3 28545 &mptctl_getiocinfo_28545
106709 +sysfs_create_bin_file_28551 sysfs_create_bin_file 0 28551 NULL
106710 +b43legacy_debugfs_write_28556 b43legacy_debugfs_write 3 28556 NULL
106711 +i2o_msg_post_wait_mem_28558 i2o_msg_post_wait_mem 0 28558 NULL
106712 +asymmetric_verify_28567 asymmetric_verify 3 28567 NULL
106713 +oxygen_read32_28582 oxygen_read32 0 28582 NULL
106714 +ocfs2_read_dir_block_28587 ocfs2_read_dir_block 2 28587 NULL
106715 +extract_entropy_28604 extract_entropy 5-3 28604 NULL
106716 +kfifo_unused_28612 kfifo_unused 0 28612 NULL
106717 +mp_override_legacy_irq_28618 mp_override_legacy_irq 4 28618 NULL
106718 +snd_nm256_capture_copy_28622 snd_nm256_capture_copy 5-3 28622 NULL
106719 +setup_usemap_28636 setup_usemap 3-4 28636 NULL
106720 +qib_handle_6120_hwerrors_28642 qib_handle_6120_hwerrors 3 28642 NULL
106721 +p9_fcall_alloc_28652 p9_fcall_alloc 1 28652 NULL
106722 +read_nic_io_byte_28654 read_nic_io_byte 0 28654 NULL
106723 +btrfs_previous_item_28667 btrfs_previous_item 0 28667 NULL
106724 +blk_queue_resize_tags_28670 blk_queue_resize_tags 2 28670 NULL
106725 +__perf_sw_event_28684 __perf_sw_event 2 28684 NULL
106726 +SyS_setgroups16_28686 SyS_setgroups16 1 28686 NULL
106727 +kvm_mmu_get_page_28692 kvm_mmu_get_page 2 28692 NULL
106728 +balance_level_28707 balance_level 0 28707 NULL
106729 +drm_plane_init_28731 drm_plane_init 6 28731 NULL
106730 +alloc_irq_cfg_28734 alloc_irq_cfg 2 28734 NULL
106731 +spi_execute_28736 spi_execute 5 28736 NULL
106732 +snd_pcm_aio_write_28738 snd_pcm_aio_write 3 28738 NULL
106733 +read_file_btcoex_28743 read_file_btcoex 3 28743 NULL
106734 +max_hw_blocks_28748 max_hw_blocks 0 28748 NULL
106735 +rpc_pipe_generic_upcall_28766 rpc_pipe_generic_upcall 4 28766 NULL
106736 +atomic_inc_return_unchecked_28778 atomic_inc_return_unchecked 0 28778 NULL
106737 +ath6kl_get_num_reg_28780 ath6kl_get_num_reg 0 28780 NULL
106738 +btrfs_block_rsv_refill_28800 btrfs_block_rsv_refill 3 28800 NULL nohasharray
106739 +sel_write_member_28800 sel_write_member 3 28800 &btrfs_block_rsv_refill_28800
106740 +cgroup_file_read_28804 cgroup_file_read 3 28804 NULL
106741 +memory_bm_create_28814 memory_bm_create 0 28814 NULL
106742 +iwl_dbgfs_rxon_filter_flags_read_28832 iwl_dbgfs_rxon_filter_flags_read 3 28832 NULL
106743 +vp_request_msix_vectors_28849 vp_request_msix_vectors 2 28849 NULL
106744 +ipv6_renew_options_28867 ipv6_renew_options 5 28867 NULL
106745 +max_io_len_target_boundary_28879 max_io_len_target_boundary 0-1 28879 NULL
106746 +packet_sendmsg_spkt_28885 packet_sendmsg_spkt 4 28885 NULL
106747 +to_cblock_28899 to_cblock 0-1 28899 NULL
106748 +da9055_group_write_28904 da9055_group_write 2-3 28904 NULL
106749 +ps_upsd_timeouts_read_28924 ps_upsd_timeouts_read 3 28924 NULL
106750 +iwl_dbgfs_sleep_level_override_write_28925 iwl_dbgfs_sleep_level_override_write 3 28925 NULL
106751 +ocfs2_frozen_trigger_28929 ocfs2_frozen_trigger 4 28929 NULL
106752 +push_rx_28939 push_rx 3 28939 NULL
106753 +btrfs_trim_block_group_28963 btrfs_trim_block_group 4-3 28963 NULL
106754 +alloc_sched_domains_28972 alloc_sched_domains 1 28972 NULL
106755 +remap_pfn_range_28976 remap_pfn_range 3-4-2 28976 NULL
106756 +ext4_mb_add_groupinfo_28988 ext4_mb_add_groupinfo 2 28988 NULL
106757 +hci_sock_setsockopt_28993 hci_sock_setsockopt 5 28993 NULL
106758 +bin_uuid_28999 bin_uuid 3 28999 NULL
106759 +fd_execute_rw_29004 fd_execute_rw 3 29004 NULL nohasharray
106760 +offset_to_bitmap_29004 offset_to_bitmap 2 29004 &fd_execute_rw_29004
106761 +xz_dec_init_29029 xz_dec_init 2 29029 NULL
106762 +i915_gem_object_bind_to_vm_29035 i915_gem_object_bind_to_vm 0 29035 NULL
106763 +ieee80211_if_read_ht_opmode_29044 ieee80211_if_read_ht_opmode 3 29044 NULL
106764 +ProcessGetHostMibs_29049 ProcessGetHostMibs 0 29049 NULL nohasharray
106765 +rxrpc_sendmsg_29049 rxrpc_sendmsg 4 29049 &ProcessGetHostMibs_29049
106766 +btrfs_root_bytenr_29058 btrfs_root_bytenr 0 29058 NULL
106767 +iso_packets_buffer_init_29061 iso_packets_buffer_init 3-4 29061 NULL
106768 +roundup_64_29066 roundup_64 2-0-1 29066 NULL
106769 +lpfc_idiag_extacc_drivr_get_29067 lpfc_idiag_extacc_drivr_get 0-3 29067 NULL
106770 +sctp_getsockopt_assoc_stats_29074 sctp_getsockopt_assoc_stats 2 29074 NULL
106771 +iwl_dbgfs_log_event_write_29088 iwl_dbgfs_log_event_write 3 29088 NULL
106772 +i915_error_object_create_sized_29091 i915_error_object_create_sized 3 29091 NULL
106773 +init_cache_node_node_29092 init_cache_node_node 1 29092 NULL
106774 +isdn_ppp_write_29109 isdn_ppp_write 4 29109 NULL
106775 +snprintf_29125 snprintf 0 29125 NULL
106776 +iov_shorten_29130 iov_shorten 0 29130 NULL
106777 +proc_scsi_write_29142 proc_scsi_write 3 29142 NULL
106778 +alloc_irqs_from_29152 alloc_irqs_from 1-2-0-3 29152 NULL
106779 +kvm_mmu_notifier_clear_flush_young_29154 kvm_mmu_notifier_clear_flush_young 3 29154 NULL
106780 +drm_property_create_enum_29201 drm_property_create_enum 5 29201 NULL
106781 +wusb_prf_256_29203 wusb_prf_256 7 29203 NULL
106782 +__mm_populate_29204 __mm_populate 1-2 29204 NULL
106783 +do_shrinker_shrink_29208 do_shrinker_shrink 0 29208 NULL
106784 +rds_iw_inc_copy_to_user_29214 rds_iw_inc_copy_to_user 3 29214 NULL
106785 +iwl_dbgfs_temperature_read_29224 iwl_dbgfs_temperature_read 3 29224 NULL
106786 +nvme_trans_copy_from_user_29227 nvme_trans_copy_from_user 3 29227 NULL
106787 +irq_domain_add_linear_29236 irq_domain_add_linear 2 29236 NULL
106788 +recover_peb_29238 recover_peb 0-7-6 29238 NULL
106789 +evdev_handle_get_val_29242 evdev_handle_get_val 5-6 29242 NULL
106790 +rw_verify_area_29243 rw_verify_area 0-4 29243 NULL
106791 +security_context_to_sid_core_29248 security_context_to_sid_core 2 29248 NULL
106792 +block_div_29268 block_div 0-1-2 29268 NULL
106793 +prism2_set_genericelement_29277 prism2_set_genericelement 3 29277 NULL
106794 +bitmap_ord_to_pos_29279 bitmap_ord_to_pos 3 29279 NULL
106795 +ext4_fiemap_29296 ext4_fiemap 4 29296 NULL
106796 +sn9c102_read_29305 sn9c102_read 3 29305 NULL
106797 +replace_file_extents_29312 replace_file_extents 0 29312 NULL
106798 +__fuse_get_req_29315 __fuse_get_req 2 29315 NULL
106799 +lprocfs_write_helper_29323 lprocfs_write_helper 2 29323 NULL
106800 +kvm_handle_hva_29326 kvm_handle_hva 2 29326 NULL
106801 +tun_put_user_29337 tun_put_user 5 29337 NULL
106802 +__alloc_ei_netdev_29338 __alloc_ei_netdev 1 29338 NULL
106803 +ide_read_altstatus_29343 ide_read_altstatus 0 29343 NULL
106804 +l2cap_sock_setsockopt_old_29346 l2cap_sock_setsockopt_old 4 29346 NULL
106805 +gpiochip_find_base_29366 gpiochip_find_base 0-1 29366 NULL
106806 +ktime_us_delta_29375 ktime_us_delta 0 29375 NULL
106807 +mwifiex_cfg80211_mgmt_tx_29387 mwifiex_cfg80211_mgmt_tx 7 29387 NULL
106808 +pca953x_irq_setup_29407 pca953x_irq_setup 3 29407 NULL
106809 +btrfs_run_delayed_items_29428 btrfs_run_delayed_items 0 29428 NULL
106810 +mempool_create_29437 mempool_create 1 29437 NULL
106811 +crypto_ahash_alignmask_29445 crypto_ahash_alignmask 0 29445 NULL
106812 +p9_client_prepare_req_29448 p9_client_prepare_req 3 29448 NULL
106813 +ept_page_fault_29450 ept_page_fault 2 29450 NULL
106814 +__kmalloc_node_29455 __kmalloc_node 3 29455 NULL
106815 +validate_scan_freqs_29462 validate_scan_freqs 0 29462 NULL
106816 +SyS_flistxattr_29474 SyS_flistxattr 3 29474 NULL
106817 +do_register_entry_29478 do_register_entry 4 29478 NULL
106818 +simple_strtoul_29480 simple_strtoul 0 29480 NULL
106819 +sched_clock_local_29498 sched_clock_local 0 29498 NULL
106820 +btmrvl_pscmd_write_29504 btmrvl_pscmd_write 3 29504 NULL
106821 +btrfs_file_extent_disk_bytenr_29505 btrfs_file_extent_disk_bytenr 0 29505 NULL
106822 +write_file_regidx_29517 write_file_regidx 3 29517 NULL
106823 +atk_debugfs_ggrp_read_29522 atk_debugfs_ggrp_read 3 29522 NULL
106824 +_regmap_raw_write_29541 _regmap_raw_write 2-4 29541 NULL
106825 +vfs_read_29543 vfs_read 0-3 29543 NULL
106826 +set_brk_29551 set_brk 1-2 29551 NULL nohasharray
106827 +ftrace_write_29551 ftrace_write 3 29551 &set_brk_29551
106828 +idetape_queue_rw_tail_29562 idetape_queue_rw_tail 3 29562 NULL
106829 +leaf_dealloc_29566 leaf_dealloc 3 29566 NULL
106830 +kvm_read_guest_virt_system_29569 kvm_read_guest_virt_system 4-2 29569 NULL
106831 +p9_fd_read_29570 p9_fd_read 3 29570 NULL
106832 +lbs_lowsnr_read_29571 lbs_lowsnr_read 3 29571 NULL
106833 +iwl_dbgfs_missed_beacon_write_29586 iwl_dbgfs_missed_beacon_write 3 29586 NULL
106834 +pvr2_hdw_report_unlocked_29589 pvr2_hdw_report_unlocked 4-0 29589 NULL
106835 +dio_set_defer_completion_29599 dio_set_defer_completion 0 29599 NULL
106836 +slots_per_page_29601 slots_per_page 0 29601 NULL
106837 +osc_cached_mb_seq_write_29610 osc_cached_mb_seq_write 3 29610 NULL
106838 +nla_get_u16_29624 nla_get_u16 0 29624 NULL
106839 +tx_frag_cache_hit_read_29639 tx_frag_cache_hit_read 3 29639 NULL
106840 +sctp_make_abort_user_29654 sctp_make_abort_user 3 29654 NULL
106841 +sisusb_write_mem_bulk_29678 sisusb_write_mem_bulk 4 29678 NULL
106842 +tracepoint_probe_register_29688 tracepoint_probe_register 0 29688 NULL
106843 +__btrfs_setxattr_29689 __btrfs_setxattr 0 29689 NULL
106844 +jbd2_journal_restart_29692 jbd2_journal_restart 0 29692 NULL
106845 +lustre_posix_acl_xattr_2ext_29693 lustre_posix_acl_xattr_2ext 2 29693 NULL
106846 +posix_acl_from_xattr_29708 posix_acl_from_xattr 3 29708 NULL
106847 +probes_write_29711 probes_write 3 29711 NULL
106848 +read_cis_cache_29735 read_cis_cache 4 29735 NULL
106849 +xfs_new_eof_29737 xfs_new_eof 2 29737 NULL
106850 +std_nic_write_29752 std_nic_write 3 29752 NULL
106851 +dbAlloc_29794 dbAlloc 0 29794 NULL
106852 +ext4_trim_all_free_29806 ext4_trim_all_free 4-3-2 29806 NULL
106853 +tcp_sendpage_29829 tcp_sendpage 4 29829 NULL
106854 +scan_bitmap_block_29840 scan_bitmap_block 4 29840 NULL
106855 +__probe_kernel_write_29842 __probe_kernel_write 3 29842 NULL
106856 +kvm_read_hva_atomic_29848 kvm_read_hva_atomic 3 29848 NULL
106857 +count_partial_29850 count_partial 0 29850 NULL
106858 +radeon_ttm_tt_create_29859 radeon_ttm_tt_create 2 29859 NULL
106859 +solo_enc_alloc_29860 solo_enc_alloc 3 29860 NULL
106860 +ipv6_setsockopt_29871 ipv6_setsockopt 5 29871 NULL
106861 +tlv_put_29872 tlv_put 0 29872 NULL
106862 +scsi_end_request_29876 scsi_end_request 3 29876 NULL
106863 +changed_extent_29881 changed_extent 0 29881 NULL
106864 +crypto_aead_alignmask_29885 crypto_aead_alignmask 0 29885 NULL
106865 +lov_ost_pool_extend_29914 lov_ost_pool_extend 2 29914 NULL
106866 +write_file_queue_29922 write_file_queue 3 29922 NULL
106867 +ext4_xattr_set_acl_29930 ext4_xattr_set_acl 4 29930 NULL
106868 +__btrfs_getxattr_29947 __btrfs_getxattr 0 29947 NULL nohasharray
106869 +ipv6_recv_error_29947 ipv6_recv_error 3 29947 &__btrfs_getxattr_29947
106870 +diva_os_get_context_size_29983 diva_os_get_context_size 0 29983 NULL
106871 +arch_setup_dmar_msi_29992 arch_setup_dmar_msi 1 29992 NULL
106872 +vmci_host_setup_notify_30002 vmci_host_setup_notify 2 30002 NULL
106873 +dev_mem_write_30028 dev_mem_write 3 30028 NULL
106874 +alloc_netdev_mqs_30030 alloc_netdev_mqs 1 30030 NULL
106875 +scsi_vpd_inquiry_30040 scsi_vpd_inquiry 4 30040 NULL
106876 +drp_wmove_30043 drp_wmove 4 30043 NULL nohasharray
106877 +wrmalt_30043 wrmalt 0 30043 &drp_wmove_30043
106878 +mem_cgroup_charge_common_30047 mem_cgroup_charge_common 0 30047 NULL
106879 +SyS_write_30059 SyS_write 3 30059 NULL
106880 +cxgbi_ddp_reserve_30091 cxgbi_ddp_reserve 4 30091 NULL
106881 +snd_midi_channel_init_set_30092 snd_midi_channel_init_set 1 30092 NULL
106882 +rx_filter_data_filter_read_30098 rx_filter_data_filter_read 3 30098 NULL
106883 +defragment_dma_buffer_30113 defragment_dma_buffer 0 30113 NULL
106884 +spi_async_locked_30117 spi_async_locked 0 30117 NULL
106885 +calgary_unmap_page_30130 calgary_unmap_page 3-2 30130 NULL
106886 +recv_stream_30138 recv_stream 4 30138 NULL
106887 +u_memcpya_30139 u_memcpya 3-2 30139 NULL
106888 +elfcorehdr_read_30159 elfcorehdr_read 2 30159 NULL
106889 +alloc_switch_ctx_30165 alloc_switch_ctx 2 30165 NULL
106890 +expand_inode_data_30169 expand_inode_data 2-3 30169 NULL
106891 +btrfs_start_transaction_lflush_30178 btrfs_start_transaction_lflush 2 30178 NULL
106892 +mempool_create_page_pool_30189 mempool_create_page_pool 1 30189 NULL
106893 +drm_property_create_bitmask_30195 drm_property_create_bitmask 5 30195 NULL
106894 +snd_pcm_playback_forward_30201 snd_pcm_playback_forward 0-2 30201 NULL
106895 +usblp_ioctl_30203 usblp_ioctl 2 30203 NULL
106896 +read_4k_modal_eeprom_30212 read_4k_modal_eeprom 3 30212 NULL
106897 +bitmap_file_set_bit_30228 bitmap_file_set_bit 2 30228 NULL
106898 +shmem_unuse_inode_30263 shmem_unuse_inode 0 30263 NULL
106899 +rawv6_recvmsg_30265 rawv6_recvmsg 4 30265 NULL
106900 +hfsplus_trusted_setxattr_30270 hfsplus_trusted_setxattr 4 30270 NULL
106901 +isr_pci_pm_read_30271 isr_pci_pm_read 3 30271 NULL
106902 +compat_readv_30273 compat_readv 3 30273 NULL
106903 +lapic_register_intr_30279 lapic_register_intr 1 30279 NULL
106904 +skcipher_sendmsg_30290 skcipher_sendmsg 4 30290 NULL
106905 +pipeline_sec_frag_swi_read_30294 pipeline_sec_frag_swi_read 3 30294 NULL
106906 +tcp_sendmsg_30296 tcp_sendmsg 4 30296 NULL
106907 +osc_contention_seconds_seq_write_30305 osc_contention_seconds_seq_write 3 30305 NULL
106908 +drm_core_get_reg_ofs_30309 drm_core_get_reg_ofs 0 30309 NULL
106909 +ext4_acl_from_disk_30320 ext4_acl_from_disk 2 30320 NULL
106910 +i8254_read_30330 i8254_read 0 30330 NULL
106911 +generic_ptrace_pokedata_30338 generic_ptrace_pokedata 2 30338 NULL
106912 +resource_from_user_30341 resource_from_user 3 30341 NULL
106913 +o2nm_this_node_30342 o2nm_this_node 0 30342 NULL
106914 +__vmalloc_node_flags_30352 __vmalloc_node_flags 1-2 30352 NULL
106915 +kstrtou32_from_user_30361 kstrtou32_from_user 2 30361 NULL
106916 +inet_getid_30365 inet_getid 2 30365 NULL
106917 +C_SYSC_readv_30369 C_SYSC_readv 3 30369 NULL
106918 +blkdev_issue_zeroout_30392 blkdev_issue_zeroout 3-0 30392 NULL
106919 +c4iw_init_resource_30393 c4iw_init_resource 2-3 30393 NULL
106920 +get_kernel_pages_30397 get_kernel_pages 0 30397 NULL
106921 +_drbd_bm_find_next_zero_30415 _drbd_bm_find_next_zero 2 30415 NULL
106922 +ext3_xattr_list_30419 ext3_xattr_list 3 30419 NULL
106923 +vb2_fop_write_30420 vb2_fop_write 3 30420 NULL
106924 +tx_tx_template_prepared_read_30424 tx_tx_template_prepared_read 3 30424 NULL
106925 +mq_create_30425 mq_create 1 30425 NULL nohasharray
106926 +lstcon_session_info_30425 lstcon_session_info 6 30425 &mq_create_30425
106927 +ext4_ext_create_new_leaf_30428 ext4_ext_create_new_leaf 0 30428 NULL
106928 +enable_write_30456 enable_write 3 30456 NULL
106929 +pci_resource_alignment_30457 pci_resource_alignment 0 30457 NULL
106930 +tx_tx_template_programmed_read_30461 tx_tx_template_programmed_read 3 30461 NULL
106931 +urandom_read_30462 urandom_read 3 30462 NULL
106932 +zoran_ioctl_30465 zoran_ioctl 2 30465 NULL
106933 +i2c_ctrl_read_30467 i2c_ctrl_read 0 30467 NULL
106934 +send_rmdir_30470 send_rmdir 0 30470 NULL
106935 +i915_mutex_lock_interruptible_30474 i915_mutex_lock_interruptible 0 30474 NULL nohasharray
106936 +ocrdma_reg_user_mr_30474 ocrdma_reg_user_mr 2-3 30474 &i915_mutex_lock_interruptible_30474
106937 +write_head_30481 write_head 4 30481 NULL
106938 +adu_write_30487 adu_write 3 30487 NULL
106939 +dtim_interval_write_30489 dtim_interval_write 3 30489 NULL
106940 +btrfs_free_extent_30515 btrfs_free_extent 0 30515 NULL
106941 +dwc3_testmode_write_30516 dwc3_testmode_write 3 30516 NULL
106942 +debug_debug2_read_30526 debug_debug2_read 3 30526 NULL nohasharray
106943 +set_config_30526 set_config 0 30526 &debug_debug2_read_30526
106944 +disk_expand_part_tbl_30561 disk_expand_part_tbl 2 30561 NULL
106945 +set_le_30581 set_le 4 30581 NULL
106946 +from_cblock_30582 from_cblock 0-1 30582 NULL
106947 +blk_init_tags_30592 blk_init_tags 1 30592 NULL
106948 +i2c_hid_get_report_length_30598 i2c_hid_get_report_length 0 30598 NULL
106949 +sgl_map_user_pages_30610 sgl_map_user_pages 2-3-4 30610 NULL
106950 +SyS_msgrcv_30611 SyS_msgrcv 3 30611 NULL
106951 +macvtap_sendmsg_30629 macvtap_sendmsg 4 30629 NULL
106952 +ieee80211_if_read_dot11MeshAwakeWindowDuration_30631 ieee80211_if_read_dot11MeshAwakeWindowDuration 3 30631 NULL
106953 +compat_raw_setsockopt_30634 compat_raw_setsockopt 5 30634 NULL
106954 +mlx5_ib_alloc_fast_reg_page_list_30638 mlx5_ib_alloc_fast_reg_page_list 2 30638 NULL
106955 +SyS_listxattr_30647 SyS_listxattr 3 30647 NULL
106956 +agp_remap_30665 agp_remap 2 30665 NULL
106957 +jffs2_flash_read_30667 jffs2_flash_read 0 30667 NULL
106958 +ni_ai_fifo_read_30681 ni_ai_fifo_read 3 30681 NULL
106959 +dccp_setsockopt_ccid_30701 dccp_setsockopt_ccid 4 30701 NULL
106960 +lbs_wrbbp_write_30712 lbs_wrbbp_write 3 30712 NULL
106961 +ocfs2_find_cpos_for_left_leaf_30713 ocfs2_find_cpos_for_left_leaf 0 30713 NULL
106962 +__mutex_lock_common_30717 __mutex_lock_common 0 30717 NULL
106963 +lbs_debugfs_read_30721 lbs_debugfs_read 3 30721 NULL
106964 +snd_nm256_playback_silence_30727 snd_nm256_playback_silence 4-3 30727 NULL
106965 +snapshot_status_30744 snapshot_status 5 30744 NULL
106966 +fuse_conn_limit_write_30777 fuse_conn_limit_write 3 30777 NULL
106967 +SyS_pread64_30778 SyS_pread64 3 30778 NULL
106968 +btrfs_run_ordered_operations_30793 btrfs_run_ordered_operations 0 30793 NULL
106969 +smk_read_doi_30813 smk_read_doi 3 30813 NULL
106970 +SYSC_splice_30820 SYSC_splice 5 30820 NULL
106971 +xlog_grant_head_wait_30829 xlog_grant_head_wait 4 30829 NULL
106972 +get_kobj_path_length_30831 get_kobj_path_length 0 30831 NULL
106973 +sctp_setsockopt_auth_chunk_30843 sctp_setsockopt_auth_chunk 3 30843 NULL
106974 +cfg80211_rx_mgmt_30844 cfg80211_rx_mgmt 5 30844 NULL
106975 +wd_autoreset_write_30862 wd_autoreset_write 3 30862 NULL
106976 +ieee80211_if_fmt_dropped_frames_no_route_30884 ieee80211_if_fmt_dropped_frames_no_route 3 30884 NULL
106977 +pn_recvmsg_30887 pn_recvmsg 4 30887 NULL
106978 +sctp_setsockopt_rtoinfo_30941 sctp_setsockopt_rtoinfo 3 30941 NULL
106979 +find_free_dev_extent_30963 find_free_dev_extent 0 30963 NULL
106980 +tty_insert_flip_string_flags_30969 tty_insert_flip_string_flags 4 30969 NULL
106981 +huge_page_mask_30981 huge_page_mask 0 30981 NULL
106982 +read_file_bt_ant_diversity_30983 read_file_bt_ant_diversity 3 30983 NULL
106983 +lbs_host_sleep_read_31013 lbs_host_sleep_read 3 31013 NULL
106984 +do_setup_msi_irqs_31043 do_setup_msi_irqs 2 31043 NULL
106985 +stride_pg_count_31053 stride_pg_count 0-2-1-4-3-5 31053 NULL
106986 +lbs_failcount_read_31063 lbs_failcount_read 3 31063 NULL
106987 +find_next_bit_le_31064 find_next_bit_le 0-2-3 31064 NULL
106988 +sctp_setsockopt_context_31091 sctp_setsockopt_context 3 31091 NULL
106989 +proc_gid_map_write_31093 proc_gid_map_write 3 31093 NULL
106990 +compat_sys_get_mempolicy_31109 compat_sys_get_mempolicy 3 31109 NULL
106991 +si_calculate_power_efficiency_ratio_31111 si_calculate_power_efficiency_ratio 3-2 31111 NULL
106992 +depth_read_31112 depth_read 3 31112 NULL
106993 +kimage_normal_alloc_31140 kimage_normal_alloc 3 31140 NULL
106994 +size_inside_page_31141 size_inside_page 0-1-2 31141 NULL
106995 +w9966_v4l_read_31148 w9966_v4l_read 3 31148 NULL
106996 +ch_do_scsi_31171 ch_do_scsi 4 31171 NULL
106997 +r592_read_fifo_pio_31198 r592_read_fifo_pio 3 31198 NULL
106998 +mtdchar_readoob_31200 mtdchar_readoob 4 31200 NULL
106999 +__btrfs_free_reserved_extent_31207 __btrfs_free_reserved_extent 2 31207 NULL
107000 +kvm_mmu_page_fault_31213 kvm_mmu_page_fault 2 31213 NULL
107001 +cpumask_weight_31215 cpumask_weight 0 31215 NULL
107002 +__read_reg_31216 __read_reg 0 31216 NULL
107003 +atm_get_addr_31221 atm_get_addr 3 31221 NULL
107004 +ulist_rbtree_insert_31235 ulist_rbtree_insert 0 31235 NULL
107005 +tcp_recvmsg_31238 tcp_recvmsg 4 31238 NULL
107006 +cyy_readb_31240 cyy_readb 0 31240 NULL
107007 +_create_sg_bios_31244 _create_sg_bios 4 31244 NULL
107008 +ieee80211_if_read_last_beacon_31257 ieee80211_if_read_last_beacon 3 31257 NULL
107009 +sctp_tsnmap_find_gap_ack_31272 sctp_tsnmap_find_gap_ack 3-2 31272 NULL
107010 +uvc_simplify_fraction_31303 uvc_simplify_fraction 3 31303 NULL
107011 +push_leaf_left_31306 push_leaf_left 0 31306 NULL
107012 +sisusbcon_scroll_31315 sisusbcon_scroll 5-2-3 31315 NULL
107013 +command_file_write_31318 command_file_write 3 31318 NULL
107014 +ext4_xattr_block_list_31325 ext4_xattr_block_list 3 31325 NULL
107015 +hwerr_crcbits_31334 hwerr_crcbits 4 31334 NULL
107016 +radix_tree_insert_31336 radix_tree_insert 0 31336 NULL
107017 +em28xx_init_usb_xfer_31337 em28xx_init_usb_xfer 4-6 31337 NULL
107018 +__cpu_to_node_31345 __cpu_to_node 0 31345 NULL
107019 +outlen_write_31358 outlen_write 3 31358 NULL
107020 +ieee80211_rx_mgmt_auth_31366 ieee80211_rx_mgmt_auth 3 31366 NULL
107021 +xprt_rdma_allocate_31372 xprt_rdma_allocate 2 31372 NULL
107022 +vb2_vmalloc_get_userptr_31374 vb2_vmalloc_get_userptr 2-3 31374 NULL
107023 +trace_parser_get_init_31379 trace_parser_get_init 2 31379 NULL
107024 +inb_31388 inb 0 31388 NULL
107025 +key_ifindex_read_31411 key_ifindex_read 3 31411 NULL
107026 +i915_gem_object_put_fence_31413 i915_gem_object_put_fence 0 31413 NULL
107027 +_sp2d_max_pg_31422 _sp2d_max_pg 0 31422 NULL
107028 +TSS_checkhmac1_31429 TSS_checkhmac1 5 31429 NULL
107029 +snd_aw2_saa7146_get_hw_ptr_capture_31431 snd_aw2_saa7146_get_hw_ptr_capture 0 31431 NULL
107030 +acpi_sci_ioapic_setup_31445 acpi_sci_ioapic_setup 4 31445 NULL
107031 +transport_alloc_session_tags_31449 transport_alloc_session_tags 2-3 31449 NULL
107032 +opera1_xilinx_rw_31453 opera1_xilinx_rw 5 31453 NULL
107033 +register_ftrace_graph_31456 register_ftrace_graph 0 31456 NULL
107034 +input_get_new_minor_31464 input_get_new_minor 1 31464 NULL
107035 +do_fcntl_31468 do_fcntl 3 31468 NULL
107036 +xfs_btree_get_numrecs_31477 xfs_btree_get_numrecs 0 31477 NULL
107037 +__ext4_journal_get_write_access_31482 __ext4_journal_get_write_access 0 31482 NULL
107038 +alg_setkey_31485 alg_setkey 3 31485 NULL
107039 +rds_message_map_pages_31487 rds_message_map_pages 2 31487 NULL
107040 +qsfp_2_read_31491 qsfp_2_read 3 31491 NULL
107041 +__alloc_bootmem_31498 __alloc_bootmem 1 31498 NULL
107042 +nouveau_mc_create__31513 nouveau_mc_create_ 5 31513 NULL
107043 +hidraw_write_31536 hidraw_write 3 31536 NULL
107044 +mtd_div_by_eb_31543 mtd_div_by_eb 0-1 31543 NULL
107045 +usbvision_read_31555 usbvision_read 3 31555 NULL
107046 +tx_frag_tkip_called_read_31575 tx_frag_tkip_called_read 3 31575 NULL
107047 +get_max_inline_xattr_value_size_31578 get_max_inline_xattr_value_size 0 31578 NULL
107048 +osst_write_31581 osst_write 3 31581 NULL
107049 +snd_compr_get_avail_31584 snd_compr_get_avail 0 31584 NULL
107050 +iwl_dbgfs_ucode_tx_stats_read_31611 iwl_dbgfs_ucode_tx_stats_read 3 31611 NULL
107051 +mtd_get_user_prot_info_31616 mtd_get_user_prot_info 0 31616 NULL
107052 +arvo_sysfs_read_31617 arvo_sysfs_read 6 31617 NULL
107053 +videobuf_read_one_31637 videobuf_read_one 3 31637 NULL
107054 +pod_alloc_sysex_buffer_31651 pod_alloc_sysex_buffer 3 31651 NULL
107055 +xfer_secondary_pool_31661 xfer_secondary_pool 2 31661 NULL
107056 +emulator_set_cr_31665 emulator_set_cr 3 31665 NULL
107057 +__lgread_31668 __lgread 4 31668 NULL
107058 +forced_ps_read_31685 forced_ps_read 3 31685 NULL
107059 +fst_recover_rx_error_31687 fst_recover_rx_error 3 31687 NULL
107060 +reiserfs_in_journal_31689 reiserfs_in_journal 3 31689 NULL
107061 +gfn_to_hva_read_31728 gfn_to_hva_read 2 31728 NULL
107062 +utf16s_to_utf8s_31735 utf16s_to_utf8s 0 31735 NULL nohasharray
107063 +lu_buf_check_and_grow_31735 lu_buf_check_and_grow 2 31735 &utf16s_to_utf8s_31735
107064 +shmem_pwrite_slow_31741 shmem_pwrite_slow 3-2 31741 NULL
107065 +NCR_700_change_queue_depth_31742 NCR_700_change_queue_depth 2 31742 NULL nohasharray
107066 +input_abs_get_max_31742 input_abs_get_max 0 31742 &NCR_700_change_queue_depth_31742
107067 +muldiv64_31743 muldiv64 3-2-0 31743 NULL
107068 +stmpe_set_altfunc_31750 stmpe_set_altfunc 2 31750 NULL nohasharray
107069 +bcm_char_read_31750 bcm_char_read 3 31750 &stmpe_set_altfunc_31750
107070 +snd_seq_device_new_31753 snd_seq_device_new 4 31753 NULL
107071 +SyS_lsetxattr_31766 SyS_lsetxattr 4 31766 NULL
107072 +usblp_cache_device_id_string_31790 usblp_cache_device_id_string 0 31790 NULL
107073 +ecryptfs_send_message_locked_31801 ecryptfs_send_message_locked 2 31801 NULL
107074 +isr_rx_procs_read_31804 isr_rx_procs_read 3 31804 NULL
107075 +data_write_31805 data_write 3 31805 NULL
107076 +SyS_msgsnd_31814 SyS_msgsnd 3 31814 NULL
107077 +strnlen_user_31815 strnlen_user 0-2 31815 NULL
107078 +sta_last_signal_read_31818 sta_last_signal_read 3 31818 NULL
107079 +fsnotify_perm_31843 fsnotify_perm 0 31843 NULL
107080 +SyS_ppoll_31855 SyS_ppoll 2 31855 NULL
107081 +iwl_dbgfs_disable_ht40_write_31876 iwl_dbgfs_disable_ht40_write 3 31876 NULL
107082 +drm_mode_crtc_set_gamma_size_31881 drm_mode_crtc_set_gamma_size 2 31881 NULL
107083 +ddb_output_write_31902 ddb_output_write 3-0 31902 NULL
107084 +xattr_permission_31907 xattr_permission 0 31907 NULL
107085 +lu_buf_realloc_31915 lu_buf_realloc 2 31915 NULL
107086 +new_dir_31919 new_dir 3 31919 NULL
107087 +kmem_alloc_31920 kmem_alloc 1 31920 NULL
107088 +guestwidth_to_adjustwidth_31937 guestwidth_to_adjustwidth 0-1 31937 NULL
107089 +process_recorded_refs_if_needed_31938 process_recorded_refs_if_needed 0 31938 NULL
107090 +SYSC_sethostname_31940 SYSC_sethostname 2 31940 NULL
107091 +iov_iter_copy_from_user_31942 iov_iter_copy_from_user 4-0 31942 NULL nohasharray
107092 +read_mem_31942 read_mem 3 31942 &iov_iter_copy_from_user_31942
107093 +vb2_write_31948 vb2_write 3 31948 NULL
107094 +pvr2_ctrl_get_valname_31951 pvr2_ctrl_get_valname 4 31951 NULL
107095 +regcache_rbtree_sync_31964 regcache_rbtree_sync 2 31964 NULL
107096 +copy_from_user_toio_31966 copy_from_user_toio 3 31966 NULL
107097 +mtd_add_partition_31971 mtd_add_partition 3 31971 NULL
107098 +btrfs_insert_empty_inode_31975 btrfs_insert_empty_inode 0 31975 NULL
107099 +vx_read_status_31982 vx_read_status 0 31982 NULL nohasharray
107100 +iblock_execute_rw_31982 iblock_execute_rw 3 31982 &vx_read_status_31982
107101 +find_next_zero_bit_31990 find_next_zero_bit 3-2-0 31990 NULL
107102 +default_setup_hpet_msi_31991 default_setup_hpet_msi 1 31991 NULL
107103 +lustre_acl_xattr_merge2posix_31992 lustre_acl_xattr_merge2posix 2 31992 NULL
107104 +sysfs_create_file_31996 sysfs_create_file 0 31996 NULL
107105 +tps6586x_irq_map_32002 tps6586x_irq_map 2 32002 NULL
107106 +calc_hmac_32010 calc_hmac 3 32010 NULL
107107 +vmcs_read64_32012 vmcs_read64 0 32012 NULL
107108 +resource_alignment_32020 resource_alignment 0 32020 NULL
107109 +aead_len_32021 aead_len 0 32021 NULL
107110 +ocfs2_remove_extent_32032 ocfs2_remove_extent 4-3-0 32032 NULL
107111 +posix_acl_set_32037 posix_acl_set 4 32037 NULL
107112 +stk_read_32038 stk_read 3 32038 NULL
107113 +vmw_cursor_update_dmabuf_32045 vmw_cursor_update_dmabuf 3-4 32045 NULL
107114 +ocfs2_update_edge_lengths_32046 ocfs2_update_edge_lengths 0 32046 NULL
107115 +SYSC_llistxattr_32061 SYSC_llistxattr 3 32061 NULL
107116 +proc_scsi_devinfo_write_32064 proc_scsi_devinfo_write 3 32064 NULL
107117 +cow_file_range_inline_32091 cow_file_range_inline 3 32091 NULL
107118 +bio_alloc_32095 bio_alloc 2 32095 NULL
107119 +alloc_pwms_32100 alloc_pwms 1-2 32100 NULL
107120 +ath6kl_fwlog_read_32101 ath6kl_fwlog_read 3 32101 NULL
107121 +disk_status_32120 disk_status 4 32120 NULL
107122 +kobject_add_internal_32133 kobject_add_internal 0 32133 NULL
107123 +btrfs_split_item_32164 btrfs_split_item 0 32164 NULL
107124 +venus_link_32165 venus_link 5 32165 NULL
107125 +do_writepages_32173 do_writepages 0 32173 NULL nohasharray
107126 +ntfs_rl_realloc_nofail_32173 ntfs_rl_realloc_nofail 3 32173 &do_writepages_32173
107127 +load_header_32183 load_header 0 32183 NULL
107128 +ubi_wl_scrub_peb_32196 ubi_wl_scrub_peb 0 32196 NULL
107129 +wusb_ccm_mac_32199 wusb_ccm_mac 7 32199 NULL
107130 +__mem_cgroup_try_charge_swapin_32204 __mem_cgroup_try_charge_swapin 0 32204 NULL
107131 +riva_get_cmap_len_32218 riva_get_cmap_len 0 32218 NULL
107132 +caif_seqpkt_recvmsg_32241 caif_seqpkt_recvmsg 4 32241 NULL
107133 +lbs_lowrssi_read_32242 lbs_lowrssi_read 3 32242 NULL
107134 +ocfs2_xattr_find_entry_32260 ocfs2_xattr_find_entry 0 32260 NULL
107135 +vmalloc_user_32308 vmalloc_user 1 32308 NULL
107136 +kvm_set_spte_hva_32312 kvm_set_spte_hva 2 32312 NULL
107137 +cas_calc_tabort_32316 cas_calc_tabort 0 32316 NULL
107138 +SyS_select_32319 SyS_select 1 32319 NULL
107139 +nouveau_bar_create__32332 nouveau_bar_create_ 4 32332 NULL
107140 +nl80211_send_mlme_event_32337 nl80211_send_mlme_event 4 32337 NULL
107141 +apic_get_tmcct_32338 apic_get_tmcct 0 32338 NULL
107142 +t4_alloc_mem_32342 t4_alloc_mem 1 32342 NULL
107143 +dispatch_ioctl_32357 dispatch_ioctl 2 32357 NULL nohasharray
107144 +rx_streaming_always_write_32357 rx_streaming_always_write 3 32357 &dispatch_ioctl_32357
107145 +sel_read_initcon_32362 sel_read_initcon 3 32362 NULL nohasharray
107146 +ReadHDLCPCI_32362 ReadHDLCPCI 0 32362 &sel_read_initcon_32362
107147 +__wait_seqno_32370 __wait_seqno 0 32370 NULL
107148 +_drbd_bm_find_next_32372 _drbd_bm_find_next 2 32372 NULL
107149 +cpuset_slab_spread_node_32376 cpuset_slab_spread_node 0 32376 NULL
107150 +intel_iommu_map_32384 intel_iommu_map 4-3 32384 NULL
107151 +local_clock_32385 local_clock 0 32385 NULL
107152 +ocfs2_cancel_convert_32392 ocfs2_cancel_convert 0 32392 NULL
107153 +ll_setxattr_common_32398 ll_setxattr_common 4 32398 NULL
107154 +xfs_iext_add_indirect_multi_32400 xfs_iext_add_indirect_multi 3 32400 NULL
107155 +__add_missing_keys_32402 __add_missing_keys 0 32402 NULL
107156 +vmci_qp_alloc_32405 vmci_qp_alloc 5-3 32405 NULL
107157 +regmap_irq_map_32429 regmap_irq_map 2 32429 NULL
107158 +break_ksm_32439 break_ksm 0 32439 NULL
107159 +__ext4_handle_dirty_super_32458 __ext4_handle_dirty_super 0 32458 NULL
107160 +snd_pcm_sync_ptr_32461 snd_pcm_sync_ptr 0 32461 NULL
107161 +cache_status_32462 cache_status 5 32462 NULL
107162 +fill_readbuf_32464 fill_readbuf 3 32464 NULL
107163 +dgap_usertoboard_32490 dgap_usertoboard 4 32490 NULL
107164 +ide_driver_proc_write_32493 ide_driver_proc_write 3 32493 NULL
107165 +bypass_pwoff_write_32499 bypass_pwoff_write 3 32499 NULL
107166 +mdc_pinger_recov_seq_write_32510 mdc_pinger_recov_seq_write 3 32510 NULL
107167 +ctrl_std_val_to_sym_32516 ctrl_std_val_to_sym 5 32516 NULL
107168 +ocfs2_local_alloc_reserve_for_window_32518 ocfs2_local_alloc_reserve_for_window 0 32518 NULL
107169 +disconnect_32521 disconnect 4 32521 NULL
107170 +qsfp_read_32522 qsfp_read 0-2-4 32522 NULL
107171 +audio_get_intf_req_32524 audio_get_intf_req 0 32524 NULL nohasharray
107172 +ocfs2_refresh_qinfo_32524 ocfs2_refresh_qinfo 0 32524 &audio_get_intf_req_32524
107173 +ilo_read_32531 ilo_read 3 32531 NULL
107174 +ieee80211_if_read_estab_plinks_32533 ieee80211_if_read_estab_plinks 3 32533 NULL
107175 +format_devstat_counter_32550 format_devstat_counter 3 32550 NULL
107176 +__first_node_32558 __first_node 0 32558 NULL
107177 +aes_encrypt_fail_read_32562 aes_encrypt_fail_read 3 32562 NULL
107178 +osc_iocontrol_32565 osc_iocontrol 3 32565 NULL
107179 +mem_swapout_entry_32586 mem_swapout_entry 3 32586 NULL
107180 +pipeline_tcp_tx_stat_fifo_int_read_32589 pipeline_tcp_tx_stat_fifo_int_read 3 32589 NULL
107181 +read_file_beacon_32595 read_file_beacon 3 32595 NULL
107182 +ieee80211_if_read_dropped_frames_congestion_32603 ieee80211_if_read_dropped_frames_congestion 3 32603 NULL
107183 +irda_recvmsg_dgram_32631 irda_recvmsg_dgram 4 32631 NULL
107184 +cfg80211_roamed_32632 cfg80211_roamed 5-7 32632 NULL
107185 +ite_decode_bytes_32642 ite_decode_bytes 3 32642 NULL
107186 +kvmalloc_32646 kvmalloc 1 32646 NULL
107187 +ib_sg_dma_len_32649 ib_sg_dma_len 0 32649 NULL
107188 +generic_readlink_32654 generic_readlink 3 32654 NULL nohasharray
107189 +ftrace_startup_32654 ftrace_startup 0 32654 &generic_readlink_32654
107190 +get_unaligned_be24_32667 get_unaligned_be24 0 32667 NULL
107191 +move_addr_to_kernel_32673 move_addr_to_kernel 2 32673 NULL
107192 +apei_res_add_32674 apei_res_add 0 32674 NULL
107193 +jfs_readpages_32702 jfs_readpages 4 32702 NULL
107194 +rt2x00debug_read_queue_dump_32712 rt2x00debug_read_queue_dump 3 32712 NULL
107195 +i40e_pci_sriov_enable_32742 i40e_pci_sriov_enable 2 32742 NULL
107196 +get_arg_page_32746 get_arg_page 2 32746 NULL
107197 +megasas_change_queue_depth_32747 megasas_change_queue_depth 2 32747 NULL
107198 +stats_read_ul_32751 stats_read_ul 3 32751 NULL
107199 +begin_cmd_32766 begin_cmd 0 32766 NULL
107200 +vmci_transport_dgram_dequeue_32775 vmci_transport_dgram_dequeue 4 32775 NULL
107201 +sctp_tsnmap_grow_32784 sctp_tsnmap_grow 2 32784 NULL
107202 +ocfs2_read_inode_block_full_32790 ocfs2_read_inode_block_full 0 32790 NULL
107203 +rproc_name_read_32805 rproc_name_read 3 32805 NULL
107204 +new_tape_buffer_32866 new_tape_buffer 2 32866 NULL
107205 +io_apic_setup_irq_pin_32868 io_apic_setup_irq_pin 1-2 32868 NULL
107206 +ath6kl_usb_submit_ctrl_in_32880 ath6kl_usb_submit_ctrl_in 6 32880 NULL nohasharray
107207 +cifs_writedata_alloc_32880 cifs_writedata_alloc 1 32880 &ath6kl_usb_submit_ctrl_in_32880
107208 +vp702x_usb_inout_cmd_32884 vp702x_usb_inout_cmd 6-4 32884 NULL
107209 +ext4_get_group_number_32899 ext4_get_group_number 0-2 32899 NULL
107210 +il_dbgfs_tx_stats_read_32913 il_dbgfs_tx_stats_read 3 32913 NULL
107211 +zlib_inflate_workspacesize_32927 zlib_inflate_workspacesize 0 32927 NULL
107212 +rmap_recycle_32938 rmap_recycle 3 32938 NULL
107213 +irq_reserve_irqs_32946 irq_reserve_irqs 1-2 32946 NULL
107214 +ext4_valid_block_bitmap_32958 ext4_valid_block_bitmap 3 32958 NULL
107215 +xfs_log_reserve_32959 xfs_log_reserve 2 32959 NULL
107216 +ext4_group_overhead_blocks_32971 ext4_group_overhead_blocks 0 32971 NULL
107217 +arch_ptrace_32981 arch_ptrace 3 32981 NULL
107218 +compat_filldir_32999 compat_filldir 3 32999 NULL
107219 +btrfs_lookup_extent_info_33000 btrfs_lookup_extent_info 0 33000 NULL
107220 +ext3_alloc_blocks_33007 ext3_alloc_blocks 3-0 33007 NULL nohasharray
107221 +SyS_syslog_33007 SyS_syslog 3 33007 &ext3_alloc_blocks_33007
107222 +br_multicast_set_hash_max_33012 br_multicast_set_hash_max 2 33012 NULL
107223 +write_file_bt_ant_diversity_33019 write_file_bt_ant_diversity 3 33019 NULL
107224 +snd_pcm_prepare_33036 snd_pcm_prepare 0 33036 NULL
107225 +SYSC_lgetxattr_33049 SYSC_lgetxattr 4 33049 NULL
107226 +pipeline_dec_packet_in_fifo_full_read_33052 pipeline_dec_packet_in_fifo_full_read 3 33052 NULL
107227 +ebt_compat_match_offset_33053 ebt_compat_match_offset 0-2 33053 NULL
107228 +bitmap_resize_33054 bitmap_resize 2-3 33054 NULL
107229 +intel_ring_flush_all_caches_33059 intel_ring_flush_all_caches 0 33059 NULL
107230 +stats_dot11RTSSuccessCount_read_33065 stats_dot11RTSSuccessCount_read 3 33065 NULL
107231 +sel_read_checkreqprot_33068 sel_read_checkreqprot 3 33068 NULL
107232 +alloc_tio_33077 alloc_tio 3 33077 NULL
107233 +acl_permission_check_33083 acl_permission_check 0 33083 NULL
107234 +write_node_33121 write_node 4 33121 NULL
107235 +fb_sys_write_33130 fb_sys_write 3 33130 NULL
107236 +__len_within_target_33132 __len_within_target 0 33132 NULL
107237 +SyS_poll_33152 SyS_poll 2 33152 NULL
107238 +debug_debug6_read_33168 debug_debug6_read 3 33168 NULL
107239 +dataflash_read_fact_otp_33204 dataflash_read_fact_otp 3-2 33204 NULL
107240 +pp_read_33210 pp_read 3 33210 NULL
107241 +xfs_file_aio_write_33234 xfs_file_aio_write 4 33234 NULL nohasharray
107242 +i915_gem_object_wait_rendering__tail_33234 i915_gem_object_wait_rendering__tail 0 33234 &xfs_file_aio_write_33234
107243 +__vb2_wait_for_done_vb_33246 __vb2_wait_for_done_vb 0 33246 NULL
107244 +snd_pcm_plug_client_size_33267 snd_pcm_plug_client_size 0-2 33267 NULL
107245 +cachefiles_cook_key_33274 cachefiles_cook_key 2 33274 NULL
107246 +sync_pt_create_33282 sync_pt_create 2 33282 NULL
107247 +btrfs_delete_one_dir_name_33303 btrfs_delete_one_dir_name 0 33303 NULL
107248 +mcs7830_get_reg_33308 mcs7830_get_reg 3 33308 NULL
107249 +isku_sysfs_read_keys_easyzone_33318 isku_sysfs_read_keys_easyzone 6 33318 NULL
107250 +vx_send_irq_dsp_33329 vx_send_irq_dsp 0 33329 NULL
107251 +joydev_ioctl_33343 joydev_ioctl 2 33343 NULL
107252 +lov_stripesize_seq_write_33353 lov_stripesize_seq_write 3 33353 NULL
107253 +create_xattr_datum_33356 create_xattr_datum 5 33356 NULL nohasharray
107254 +irq_pkt_threshold_read_33356 irq_pkt_threshold_read 3 33356 &create_xattr_datum_33356
107255 +read_file_regidx_33370 read_file_regidx 3 33370 NULL
107256 +ocfs2_quota_read_33382 ocfs2_quota_read 5 33382 NULL
107257 +ieee80211_if_read_dropped_frames_no_route_33383 ieee80211_if_read_dropped_frames_no_route 3 33383 NULL
107258 +scsi_varlen_cdb_length_33385 scsi_varlen_cdb_length 0 33385 NULL
107259 +tg_get_cfs_period_33390 tg_get_cfs_period 0 33390 NULL
107260 +ocfs2_allocate_unwritten_extents_33394 ocfs2_allocate_unwritten_extents 3-2 33394 NULL
107261 +cfs_trace_copyin_string_33396 cfs_trace_copyin_string 4 33396 NULL
107262 +ext4_meta_bg_first_block_no_33408 ext4_meta_bg_first_block_no 2 33408 NULL nohasharray
107263 +snd_pcm_capture_ioctl1_33408 snd_pcm_capture_ioctl1 0 33408 &ext4_meta_bg_first_block_no_33408
107264 +ufs_getfrag_block_33409 ufs_getfrag_block 2 33409 NULL
107265 +filemap_fdatawrite_33415 filemap_fdatawrite 0 33415 NULL
107266 +dis_tap_write_33426 dis_tap_write 3 33426 NULL
107267 +ubh_scanc_33436 ubh_scanc 0-3-4 33436 NULL
107268 +message_stats_list_33440 message_stats_list 5 33440 NULL
107269 +ovs_vport_alloc_33475 ovs_vport_alloc 1 33475 NULL
107270 +create_entry_33479 create_entry 2 33479 NULL
107271 +ip_setsockopt_33487 ip_setsockopt 5 33487 NULL nohasharray
107272 +elf_map_33487 elf_map 0-2 33487 &ip_setsockopt_33487
107273 +ol_dqblk_chunk_off_33489 ol_dqblk_chunk_off 2 33489 NULL
107274 +res_counter_read_33499 res_counter_read 4 33499 NULL
107275 +fb_read_33506 fb_read 3 33506 NULL
107276 +musb_test_mode_write_33518 musb_test_mode_write 3 33518 NULL
107277 +ahash_setkey_unaligned_33521 ahash_setkey_unaligned 3 33521 NULL
107278 +nes_alloc_fast_reg_page_list_33523 nes_alloc_fast_reg_page_list 2 33523 NULL
107279 +aggr_size_rx_size_read_33526 aggr_size_rx_size_read 3 33526 NULL
107280 +acpi_gsi_to_irq_33533 acpi_gsi_to_irq 1 33533 NULL
107281 +osc_max_rpcs_in_flight_seq_write_33539 osc_max_rpcs_in_flight_seq_write 3 33539 NULL nohasharray
107282 +tomoyo_read_self_33539 tomoyo_read_self 3 33539 &osc_max_rpcs_in_flight_seq_write_33539
107283 +read_symlink_33572 read_symlink 0 33572 NULL
107284 +count_subheaders_33591 count_subheaders 0 33591 NULL
107285 +scsi_execute_33596 scsi_execute 5 33596 NULL
107286 +comedi_buf_write_n_allocated_33604 comedi_buf_write_n_allocated 0 33604 NULL
107287 +xt_compat_target_offset_33608 xt_compat_target_offset 0 33608 NULL
107288 +usb_gstrings_attach_33615 usb_gstrings_attach 3 33615 NULL nohasharray
107289 +il_dbgfs_qos_read_33615 il_dbgfs_qos_read 3 33615 &usb_gstrings_attach_33615
107290 +do_sync_write_33624 do_sync_write 0 33624 NULL
107291 +stride_page_count_33641 stride_page_count 2 33641 NULL
107292 +slab_alloc_node_33663 slab_alloc_node 3 33663 NULL
107293 +irq_blk_threshold_read_33666 irq_blk_threshold_read 3 33666 NULL
107294 +inw_p_33668 inw_p 0 33668 NULL
107295 +arp_hdr_len_33671 arp_hdr_len 0 33671 NULL
107296 +i2c_hid_alloc_buffers_33673 i2c_hid_alloc_buffers 2 33673 NULL
107297 +nv50_disp_dmac_create__33696 nv50_disp_dmac_create_ 6 33696 NULL
107298 +netlink_sendmsg_33708 netlink_sendmsg 4 33708 NULL
107299 +tipc_link_stats_33716 tipc_link_stats 3 33716 NULL
107300 +ext4_wb_update_i_disksize_33717 ext4_wb_update_i_disksize 2 33717 NULL
107301 +pvr2_stream_buffer_count_33719 pvr2_stream_buffer_count 2 33719 NULL
107302 +ocfs2_extent_map_get_blocks_33720 ocfs2_extent_map_get_blocks 2 33720 NULL
107303 +write_file_spectral_count_33723 write_file_spectral_count 3 33723 NULL nohasharray
107304 +ocfs2_lock_allocators_move_extents_33723 ocfs2_lock_allocators_move_extents 0 33723 &write_file_spectral_count_33723
107305 +__mutex_lock_interruptible_slowpath_33735 __mutex_lock_interruptible_slowpath 0 33735 NULL
107306 +do_munmap_33752 do_munmap 0 33752 NULL
107307 +vifs_state_read_33762 vifs_state_read 3 33762 NULL
107308 +hashtab_create_33769 hashtab_create 3 33769 NULL
107309 +btrfs_delayed_refs_qgroup_accounting_33775 btrfs_delayed_refs_qgroup_accounting 0 33775 NULL
107310 +irq_map_generic_chip_33793 irq_map_generic_chip 2 33793 NULL
107311 +if_sdio_read_rx_len_33800 if_sdio_read_rx_len 0 33800 NULL
107312 +find_next_offset_33804 find_next_offset 3-0 33804 NULL
107313 +filter_write_33819 filter_write 3 33819 NULL
107314 +sep_create_msgarea_context_33829 sep_create_msgarea_context 4 33829 NULL
107315 +scrub_setup_recheck_block_33831 scrub_setup_recheck_block 5-4 33831 NULL
107316 +ext4_journal_extend_33835 ext4_journal_extend 2-0 33835 NULL
107317 +snd_pcm_action_nonatomic_33844 snd_pcm_action_nonatomic 0 33844 NULL
107318 +calgary_alloc_coherent_33851 calgary_alloc_coherent 2 33851 NULL
107319 +oz_cdev_write_33852 oz_cdev_write 3 33852 NULL
107320 +bin_string_33884 bin_string 3-5 33884 NULL
107321 +get_user_pages_33908 get_user_pages 0-3-4 33908 NULL
107322 +ath6kl_roam_mode_write_33912 ath6kl_roam_mode_write 3 33912 NULL
107323 +queue_logical_block_size_33918 queue_logical_block_size 0 33918 NULL
107324 +sel_read_avc_cache_threshold_33942 sel_read_avc_cache_threshold 3 33942 NULL
107325 +lpfc_idiag_ctlacc_read_33943 lpfc_idiag_ctlacc_read 3 33943 NULL
107326 +read_file_tgt_rx_stats_33944 read_file_tgt_rx_stats 3 33944 NULL nohasharray
107327 +btrfs_uuid_tree_rem_33944 btrfs_uuid_tree_rem 0 33944 &read_file_tgt_rx_stats_33944
107328 +hfsplus_osx_setxattr_33952 hfsplus_osx_setxattr 4 33952 NULL
107329 +__proc_dump_kernel_33954 __proc_dump_kernel 5 33954 NULL
107330 +ocfs2_create_new_meta_bhs_33955 ocfs2_create_new_meta_bhs 0 33955 NULL
107331 +vga_switcheroo_debugfs_write_33984 vga_switcheroo_debugfs_write 3 33984 NULL
107332 +snd_interval_refine_33987 snd_interval_refine 0 33987 NULL
107333 +__ntfs_malloc_34022 __ntfs_malloc 1 34022 NULL
107334 +lbs_lowrssi_write_34025 lbs_lowrssi_write 3 34025 NULL
107335 +ppp_write_34034 ppp_write 3 34034 NULL
107336 +tty_insert_flip_string_34042 tty_insert_flip_string 3-0 34042 NULL
107337 +__domain_flush_pages_34045 __domain_flush_pages 2-3 34045 NULL
107338 +is_trap_at_addr_34047 is_trap_at_addr 2 34047 NULL
107339 +memcg_update_all_caches_34068 memcg_update_all_caches 1 34068 NULL
107340 +pipeline_pipeline_fifo_full_read_34095 pipeline_pipeline_fifo_full_read 3 34095 NULL
107341 +__irq_domain_add_34101 __irq_domain_add 2 34101 NULL
107342 +i915_gem_object_set_cache_level_34107 i915_gem_object_set_cache_level 0 34107 NULL nohasharray
107343 +proc_scsi_host_write_34107 proc_scsi_host_write 3 34107 &i915_gem_object_set_cache_level_34107
107344 +is_discarded_oblock_34120 is_discarded_oblock 2 34120 NULL
107345 +islpci_mgt_transmit_34133 islpci_mgt_transmit 5 34133 NULL
107346 +ttm_dma_page_pool_free_34135 ttm_dma_page_pool_free 2-0 34135 NULL
107347 +ixgbe_dbg_netdev_ops_write_34141 ixgbe_dbg_netdev_ops_write 3 34141 NULL
107348 +shmem_pread_fast_34147 shmem_pread_fast 3 34147 NULL
107349 +ext4_xattr_list_34162 ext4_xattr_list 3 34162 NULL
107350 +ocfs2_xattr_list_entry_34165 ocfs2_xattr_list_entry 0 34165 NULL
107351 +skb_to_sgvec_34171 skb_to_sgvec 0 34171 NULL
107352 +dtf_write_run_34196 dtf_write_run 3 34196 NULL
107353 +ext4_da_write_begin_34215 ext4_da_write_begin 3-4 34215 NULL
107354 +i915_gem_execbuffer_reserve_object_34224 i915_gem_execbuffer_reserve_object 0 34224 NULL
107355 +setup_nodes_for_search_34248 setup_nodes_for_search 0 34248 NULL
107356 +numa_migrate_prep_34256 numa_migrate_prep 0 34256 NULL
107357 +btrfs_bitmap_cluster_34257 btrfs_bitmap_cluster 4 34257 NULL
107358 +bl_pipe_downcall_34264 bl_pipe_downcall 3 34264 NULL
107359 +ocfs2_dlm_lock_34265 ocfs2_dlm_lock 0 34265 NULL
107360 +pcf857x_to_irq_34273 pcf857x_to_irq 2 34273 NULL
107361 +device_private_init_34279 device_private_init 0 34279 NULL
107362 +get_task_io_context_34316 get_task_io_context 3 34316 NULL
107363 +ext4_get_groups_count_34324 ext4_get_groups_count 0 34324 NULL
107364 +iov_iter_single_seg_count_34326 iov_iter_single_seg_count 0 34326 NULL nohasharray
107365 +pcpu_need_to_extend_34326 pcpu_need_to_extend 0 34326 &iov_iter_single_seg_count_34326
107366 +SYSC_brk_34331 SYSC_brk 1 34331 NULL
107367 +__insert_34349 __insert 2-3 34349 NULL
107368 +sync_page_io_34363 sync_page_io 3 34363 NULL nohasharray
107369 +crypto_ablkcipher_ivsize_34363 crypto_ablkcipher_ivsize 0 34363 &sync_page_io_34363
107370 +rngapi_reset_34366 rngapi_reset 3 34366 NULL
107371 +reiserfs_resize_34377 reiserfs_resize 2 34377 NULL
107372 +ea_read_34378 ea_read 0 34378 NULL
107373 +fuse_send_read_34379 fuse_send_read 4 34379 NULL
107374 +av7110_vbi_write_34384 av7110_vbi_write 3 34384 NULL
107375 +usbvision_v4l2_read_34386 usbvision_v4l2_read 3 34386 NULL
107376 +read_rbu_image_type_34387 read_rbu_image_type 6 34387 NULL
107377 +iwl_calib_set_34400 iwl_calib_set 3 34400 NULL nohasharray
107378 +ivtv_read_pos_34400 ivtv_read_pos 3 34400 &iwl_calib_set_34400
107379 +wd_exp_mode_write_34407 wd_exp_mode_write 3 34407 NULL
107380 +nl80211_send_disassoc_34424 nl80211_send_disassoc 4 34424 NULL
107381 +security_socket_create_34439 security_socket_create 0 34439 NULL
107382 +usbtest_alloc_urb_34446 usbtest_alloc_urb 5-3 34446 NULL
107383 +mwifiex_regrdwr_read_34472 mwifiex_regrdwr_read 3 34472 NULL
107384 +skcipher_sndbuf_34476 skcipher_sndbuf 0 34476 NULL
107385 +i2o_parm_field_get_34477 i2o_parm_field_get 5 34477 NULL
107386 +ocfs2_block_group_clear_bits_34484 ocfs2_block_group_clear_bits 0 34484 NULL
107387 +security_inode_permission_34488 security_inode_permission 0 34488 NULL
107388 +SyS_pwritev_34494 SyS_pwritev 3 34494 NULL nohasharray
107389 +__ffs64_34494 __ffs64 1-0 34494 &SyS_pwritev_34494
107390 +qp_alloc_res_34496 qp_alloc_res 5 34496 NULL
107391 +lu_buf_check_and_alloc_34505 lu_buf_check_and_alloc 2 34505 NULL
107392 +snd_pcm_hw_param_value_34525 snd_pcm_hw_param_value 0 34525 NULL
107393 +ext4_fallocate_34537 ext4_fallocate 4-3 34537 NULL nohasharray
107394 +tracing_stats_read_34537 tracing_stats_read 3 34537 &ext4_fallocate_34537
107395 +hugetlbfs_read_actor_34547 hugetlbfs_read_actor 4-5-2-0 34547 NULL
107396 +dbBackSplit_34561 dbBackSplit 0 34561 NULL
107397 +alloc_ieee80211_rsl_34564 alloc_ieee80211_rsl 1 34564 NULL nohasharray
107398 +self_check_peb_ec_hdr_34564 self_check_peb_ec_hdr 0 34564 &alloc_ieee80211_rsl_34564
107399 +i915_gem_execbuffer_relocate_object_34575 i915_gem_execbuffer_relocate_object 0 34575 NULL
107400 +lov_stripecount_seq_write_34582 lov_stripecount_seq_write 3 34582 NULL
107401 +init_send_hfcd_34586 init_send_hfcd 1 34586 NULL
107402 +inet6_ifla6_size_34591 inet6_ifla6_size 0 34591 NULL
107403 +ceph_msgpool_init_34599 ceph_msgpool_init 4 34599 NULL nohasharray
107404 +cw1200_queue_init_34599 cw1200_queue_init 4 34599 &ceph_msgpool_init_34599
107405 +__add_prelim_ref_34600 __add_prelim_ref 0 34600 NULL
107406 +brcmf_cfg80211_mgmt_tx_34608 brcmf_cfg80211_mgmt_tx 7 34608 NULL
107407 +__jffs2_ref_totlen_34609 __jffs2_ref_totlen 0 34609 NULL nohasharray
107408 +mtd_write_34609 mtd_write 0 34609 &__jffs2_ref_totlen_34609
107409 +apei_get_nvs_resources_34616 apei_get_nvs_resources 0 34616 NULL
107410 +__cfg80211_disconnected_34622 __cfg80211_disconnected 3 34622 NULL
107411 +cnic_alloc_dma_34641 cnic_alloc_dma 3 34641 NULL
107412 +tomoyo_dump_page_34649 tomoyo_dump_page 2 34649 NULL
107413 +kvm_set_spte_hva_34671 kvm_set_spte_hva 2 34671 NULL
107414 +sleep_auth_write_34676 sleep_auth_write 3 34676 NULL
107415 +isr_fiqs_read_34687 isr_fiqs_read 3 34687 NULL
107416 +port_print_34704 port_print 3 34704 NULL
107417 +alloc_irq_and_cfg_at_34706 alloc_irq_and_cfg_at 1-2 34706 NULL
107418 +ext4_listxattr_34712 ext4_listxattr 3 34712 NULL
107419 +ieee80211_if_read_num_sta_ps_34722 ieee80211_if_read_num_sta_ps 3 34722 NULL
107420 +platform_list_read_file_34734 platform_list_read_file 3 34734 NULL
107421 +reg_w_ixbuf_34736 reg_w_ixbuf 4 34736 NULL
107422 +lsm_alloc_plain_34755 lsm_alloc_plain 1 34755 NULL
107423 +device_add_34766 device_add 0 34766 NULL
107424 +qib_cdev_init_34778 qib_cdev_init 1 34778 NULL
107425 +SYSC_keyctl_34800 SYSC_keyctl 4 34800 NULL
107426 +can_nocow_extent_34801 can_nocow_extent 2 34801 NULL
107427 +drbd_get_max_capacity_34804 drbd_get_max_capacity 0 34804 NULL
107428 +ll_setxattr_34806 ll_setxattr 4 34806 NULL
107429 +file_page_index_34820 file_page_index 0-2 34820 NULL
107430 +b43_debugfs_write_34838 b43_debugfs_write 3 34838 NULL
107431 +nl_portid_hash_zalloc_34843 nl_portid_hash_zalloc 1 34843 NULL
107432 +acpi_system_write_wakeup_device_34853 acpi_system_write_wakeup_device 3 34853 NULL
107433 +usb_serial_generic_prepare_write_buffer_34857 usb_serial_generic_prepare_write_buffer 3 34857 NULL
107434 +ieee80211_if_read_txpower_34871 ieee80211_if_read_txpower 3 34871 NULL
107435 +msg_print_text_34889 msg_print_text 0 34889 NULL
107436 +ieee80211_if_write_34894 ieee80211_if_write 3 34894 NULL
107437 +si476x_radio_read_rsq_primary_blob_34916 si476x_radio_read_rsq_primary_blob 3 34916 NULL
107438 +__inode_permission_34925 __inode_permission 0 34925 NULL nohasharray
107439 +btrfs_super_chunk_root_34925 btrfs_super_chunk_root 0 34925 &__inode_permission_34925
107440 +ODM_AllocateMemory_34929 ODM_AllocateMemory 3 34929 NULL
107441 +ceph_aio_write_34930 ceph_aio_write 4 34930 NULL
107442 +sec_flags2str_34933 sec_flags2str 3 34933 NULL
107443 +snd_info_entry_read_34938 snd_info_entry_read 3 34938 NULL
107444 +i2c_transfer_34958 i2c_transfer 0 34958 NULL
107445 +do_add_page_to_bio_34974 do_add_page_to_bio 2-10 34974 NULL
107446 +mq_lookup_34990 mq_lookup 2 34990 NULL
107447 +i915_gem_gtt_prepare_object_34991 i915_gem_gtt_prepare_object 0 34991 NULL
107448 +schedule_erase_34996 schedule_erase 0 34996 NULL
107449 +rx_rx_hdr_overflow_read_35002 rx_rx_hdr_overflow_read 3 35002 NULL
107450 +l2cap_skbuff_fromiovec_35003 l2cap_skbuff_fromiovec 4-3 35003 NULL
107451 +dm_cache_insert_mapping_35005 dm_cache_insert_mapping 2-3 35005 NULL
107452 +sisusb_copy_memory_35016 sisusb_copy_memory 4 35016 NULL
107453 +snd_pcm_hw_params_35020 snd_pcm_hw_params 0 35020 NULL
107454 +process_changed_xattr_35025 process_changed_xattr 0 35025 NULL
107455 +coda_psdev_read_35029 coda_psdev_read 3 35029 NULL
107456 +brcmf_sdio_chip_writenvram_35042 brcmf_sdio_chip_writenvram 4 35042 NULL
107457 +pwr_connection_out_of_sync_read_35061 pwr_connection_out_of_sync_read 3 35061 NULL
107458 +ext4_split_unwritten_extents_35063 ext4_split_unwritten_extents 0 35063 NULL
107459 +ntfs_attr_extend_initialized_35084 ntfs_attr_extend_initialized 2 35084 NULL
107460 +store_ifalias_35088 store_ifalias 4 35088 NULL
107461 +__kfifo_uint_must_check_helper_35097 __kfifo_uint_must_check_helper 0-1 35097 NULL
107462 +capi_write_35104 capi_write 3 35104 NULL nohasharray
107463 +tx_tx_done_template_read_35104 tx_tx_done_template_read 3 35104 &capi_write_35104
107464 +ide_settings_proc_write_35110 ide_settings_proc_write 3 35110 NULL
107465 +ceph_osdc_start_request_35122 ceph_osdc_start_request 0 35122 NULL
107466 +message_stats_print_35158 message_stats_print 6 35158 NULL
107467 +iscsi_conn_setup_35159 iscsi_conn_setup 2 35159 NULL
107468 +ieee80211_if_read_bssid_35161 ieee80211_if_read_bssid 3 35161 NULL
107469 +relocate_entry_cpu_35176 relocate_entry_cpu 0 35176 NULL
107470 +solo_v4l2_init_35179 solo_v4l2_init 2 35179 NULL
107471 +SyS_init_module_35180 SyS_init_module 2 35180 NULL
107472 +mlx4_ib_get_cq_umem_35184 mlx4_ib_get_cq_umem 5-6 35184 NULL
107473 +uprobe_get_swbp_addr_35201 uprobe_get_swbp_addr 0 35201 NULL
107474 +unix_stream_recvmsg_35210 unix_stream_recvmsg 4 35210 NULL
107475 +striped_read_35218 striped_read 0-3-2 35218 NULL nohasharray
107476 +security_key_getsecurity_35218 security_key_getsecurity 0 35218 &striped_read_35218
107477 +video_register_device_no_warn_35226 video_register_device_no_warn 3 35226 NULL nohasharray
107478 +rx_rx_cmplt_task_read_35226 rx_rx_cmplt_task_read 3 35226 &video_register_device_no_warn_35226
107479 +gfn_to_page_many_atomic_35234 gfn_to_page_many_atomic 2 35234 NULL nohasharray
107480 +may_commit_transaction_35234 may_commit_transaction 0 35234 &gfn_to_page_many_atomic_35234
107481 +SYSC_madvise_35241 SYSC_madvise 1-2 35241 NULL
107482 +set_fd_set_35249 set_fd_set 1 35249 NULL
107483 +ioapic_setup_resources_35255 ioapic_setup_resources 1 35255 NULL
107484 +alloc_thread_groups_35256 alloc_thread_groups 2 35256 NULL
107485 +jbd2_journal_get_write_access_35263 jbd2_journal_get_write_access 0 35263 NULL
107486 +dis_disc_write_35265 dis_disc_write 3 35265 NULL
107487 +dma_show_regs_35266 dma_show_regs 3 35266 NULL
107488 +irda_recvmsg_stream_35280 irda_recvmsg_stream 4 35280 NULL
107489 +i2o_block_end_request_35282 i2o_block_end_request 3 35282 NULL
107490 +isr_rx_rdys_read_35283 isr_rx_rdys_read 3 35283 NULL
107491 +brcmf_sdio_forensic_read_35311 brcmf_sdio_forensic_read 3 35311 NULL nohasharray
107492 +__btrfs_buffered_write_35311 __btrfs_buffered_write 3 35311 &brcmf_sdio_forensic_read_35311
107493 +tracing_read_pipe_35312 tracing_read_pipe 3 35312 NULL
107494 +fallback_on_nodma_alloc_35332 fallback_on_nodma_alloc 2 35332 NULL
107495 +ieee80211_if_fmt_ap_power_level_35347 ieee80211_if_fmt_ap_power_level 3 35347 NULL
107496 +nouveau_devinit_create__35348 nouveau_devinit_create_ 4 35348 NULL
107497 +ieee80211_rx_mgmt_deauth_35351 ieee80211_rx_mgmt_deauth 3 35351 NULL
107498 +compat_filldir64_35354 compat_filldir64 3 35354 NULL
107499 +amd_iommu_map_35355 amd_iommu_map 4 35355 NULL
107500 +read_kmem_35372 read_kmem 3 35372 NULL
107501 +ocfs2_journal_access_di_35393 ocfs2_journal_access_di 0 35393 NULL
107502 +create_pending_snapshots_35402 create_pending_snapshots 0 35402 NULL
107503 +btrfs_search_old_slot_35406 btrfs_search_old_slot 0 35406 NULL
107504 +SyS_getxattr_35408 SyS_getxattr 4 35408 NULL
107505 +rawv6_send_hdrinc_35425 rawv6_send_hdrinc 3 35425 NULL
107506 +C_SYSC_sendfile_35432 C_SYSC_sendfile 4 35432 NULL
107507 +__set_test_and_free_35436 __set_test_and_free 2 35436 NULL
107508 +buffer_to_user_35439 buffer_to_user 3 35439 NULL
107509 +memcg_memory_allocated_read_35469 memcg_memory_allocated_read 0 35469 NULL
107510 +fiemap_prepare_and_copy_exts_35494 fiemap_prepare_and_copy_exts 5 35494 NULL
107511 +btrfs_prealloc_file_range_trans_35500 btrfs_prealloc_file_range_trans 4-0 35500 NULL
107512 +async_setkey_35521 async_setkey 3 35521 NULL
107513 +inet_mask_len_35527 inet_mask_len 1 35527 NULL
107514 +__filemap_fdatawrite_range_35528 __filemap_fdatawrite_range 0 35528 NULL
107515 +iwl_dbgfs_bt_traffic_read_35534 iwl_dbgfs_bt_traffic_read 3 35534 NULL
107516 +pstore_mkfile_35536 pstore_mkfile 7 35536 NULL
107517 +rxpipe_tx_xfr_host_int_trig_rx_data_read_35538 rxpipe_tx_xfr_host_int_trig_rx_data_read 3 35538 NULL
107518 +ibnl_put_attr_35541 ibnl_put_attr 3 35541 NULL
107519 +ieee80211_if_write_smps_35550 ieee80211_if_write_smps 3 35550 NULL
107520 +send_update_extent_35556 send_update_extent 0 35556 NULL
107521 +vb2_dqbuf_35559 vb2_dqbuf 0 35559 NULL
107522 +sysfs_create_subdir_35567 sysfs_create_subdir 0 35567 NULL
107523 +ext4_blocks_for_truncate_35579 ext4_blocks_for_truncate 0 35579 NULL
107524 +ext2_acl_from_disk_35580 ext2_acl_from_disk 2 35580 NULL
107525 +spk_msg_set_35586 spk_msg_set 3 35586 NULL
107526 +kernel_readv_35617 kernel_readv 3 35617 NULL
107527 +reiserfs_readpages_35629 reiserfs_readpages 4 35629 NULL
107528 +ptlrpcd_steal_rqset_35637 ptlrpcd_steal_rqset 0 35637 NULL
107529 +nv50_vm_create_35643 nv50_vm_create 2-3 35643 NULL
107530 +spi_register_board_info_35651 spi_register_board_info 2 35651 NULL
107531 +rdmaltWithLock_35669 rdmaltWithLock 0 35669 NULL
107532 +cpu_to_sle64_35677 cpu_to_sle64 0-1 35677 NULL
107533 +ext3_mark_iloc_dirty_35686 ext3_mark_iloc_dirty 0 35686 NULL
107534 +dm_table_create_35687 dm_table_create 3 35687 NULL
107535 +SYSC_pwritev_35690 SYSC_pwritev 3 35690 NULL
107536 +rds_page_copy_user_35691 rds_page_copy_user 4 35691 NULL
107537 +md_super_write_35703 md_super_write 4 35703 NULL
107538 +btrfs_commit_transaction_35725 btrfs_commit_transaction 0 35725 NULL
107539 +rtw_IOL_cmd_tx_pkt_buf_dump_35732 rtw_IOL_cmd_tx_pkt_buf_dump 2 35732 NULL
107540 +ext4_truncate_restart_trans_35750 ext4_truncate_restart_trans 0 35750 NULL
107541 +iwl_dbgfs_disable_ht40_read_35761 iwl_dbgfs_disable_ht40_read 3 35761 NULL
107542 +udf_alloc_i_data_35786 udf_alloc_i_data 2 35786 NULL
107543 +pvr2_hdw_cpufw_get_35824 pvr2_hdw_cpufw_get 0-4-2 35824 NULL
107544 +tx_tx_cmplt_read_35854 tx_tx_cmplt_read 3 35854 NULL
107545 +vx_query_hbuffer_size_35859 vx_query_hbuffer_size 0 35859 NULL
107546 +changed_xattr_35860 changed_xattr 0 35860 NULL
107547 +mthca_buf_alloc_35861 mthca_buf_alloc 2 35861 NULL
107548 +fls64_35862 fls64 0 35862 NULL
107549 +kvm_dirty_bitmap_bytes_35886 kvm_dirty_bitmap_bytes 0 35886 NULL
107550 +ieee80211_if_fmt_dot11MeshRetryTimeout_35890 ieee80211_if_fmt_dot11MeshRetryTimeout 3 35890 NULL
107551 +uwb_rc_cmd_done_35892 uwb_rc_cmd_done 4 35892 NULL
107552 +SyS_set_mempolicy_35909 SyS_set_mempolicy 3 35909 NULL
107553 +kernel_setsockopt_35913 kernel_setsockopt 5 35913 NULL
107554 +rbio_nr_pages_35916 rbio_nr_pages 0-1-2 35916 NULL
107555 +balance_node_right_35920 balance_node_right 0 35920 NULL
107556 +sctp_tsnmap_mark_35929 sctp_tsnmap_mark 2 35929 NULL
107557 +rx_defrag_init_called_read_35935 rx_defrag_init_called_read 3 35935 NULL
107558 +put_cmsg_compat_35937 put_cmsg_compat 4 35937 NULL
107559 +ext_rts51x_sd_execute_write_data_35971 ext_rts51x_sd_execute_write_data 9 35971 NULL
107560 +ceph_buffer_new_35974 ceph_buffer_new 1 35974 NULL nohasharray
107561 +generic_ocp_read_35974 generic_ocp_read 3 35974 &ceph_buffer_new_35974
107562 +acl_alloc_35979 acl_alloc 1 35979 NULL
107563 +device_add_class_symlinks_35985 device_add_class_symlinks 0 35985 NULL
107564 +generic_file_aio_read_35987 generic_file_aio_read 0 35987 NULL
107565 +write_file_antenna_35998 write_file_antenna 3 35998 NULL nohasharray
107566 +kuc_alloc_35998 kuc_alloc 1 35998 &write_file_antenna_35998
107567 +locks_mandatory_area_35999 locks_mandatory_area 0 35999 NULL
107568 +il3945_ucode_tx_stats_read_36016 il3945_ucode_tx_stats_read 3 36016 NULL
107569 +ubi_eba_write_leb_36029 ubi_eba_write_leb 0-6-5 36029 NULL
107570 +__videobuf_alloc_36031 __videobuf_alloc 1 36031 NULL
107571 +account_shadowed_36048 account_shadowed 2 36048 NULL
107572 +gpio_power_read_36059 gpio_power_read 3 36059 NULL
107573 +snd_pcm_playback_hw_avail_36061 snd_pcm_playback_hw_avail 0 36061 NULL
107574 +did_create_dir_36062 did_create_dir 0 36062 NULL
107575 +write_emulate_36065 write_emulate 2-4 36065 NULL
107576 +stack_max_size_write_36068 stack_max_size_write 3 36068 NULL
107577 +radeon_vm_num_pdes_36070 radeon_vm_num_pdes 0 36070 NULL
107578 +ieee80211_if_fmt_peer_36071 ieee80211_if_fmt_peer 3 36071 NULL
107579 +ext3_new_blocks_36073 ext3_new_blocks 3-0 36073 NULL
107580 +ieee80211_if_write_tsf_36077 ieee80211_if_write_tsf 3 36077 NULL
107581 +snd_pcm_plug_read_transfer_36080 snd_pcm_plug_read_transfer 0-3 36080 NULL
107582 +mtip_hw_read_device_status_36082 mtip_hw_read_device_status 3 36082 NULL
107583 +vga_arb_write_36112 vga_arb_write 3 36112 NULL
107584 +simple_xattr_alloc_36118 simple_xattr_alloc 2 36118 NULL
107585 +compat_ptrace_request_36131 compat_ptrace_request 3 36131 NULL
107586 +vmalloc_exec_36132 vmalloc_exec 1 36132 NULL
107587 +max8925_irq_domain_map_36133 max8925_irq_domain_map 2 36133 NULL
107588 +set_flexbg_block_bitmap_36136 set_flexbg_block_bitmap 0-4-5 36136 NULL
107589 +ext3_readpages_36144 ext3_readpages 4 36144 NULL
107590 +twl_set_36154 twl_set 2 36154 NULL
107591 +b1_alloc_card_36155 b1_alloc_card 1 36155 NULL
107592 +btrfs_file_extent_inline_len_36158 btrfs_file_extent_inline_len 0 36158 NULL
107593 +snd_korg1212_copy_from_36169 snd_korg1212_copy_from 6 36169 NULL
107594 +wm8994_edge_irq_map_36170 wm8994_edge_irq_map 2 36170 NULL
107595 +SyS_kexec_load_36176 SyS_kexec_load 2 36176 NULL
107596 +ramoops_init_przs_36199 ramoops_init_przs 4 36199 NULL
107597 +SYSC_sched_getaffinity_36208 SYSC_sched_getaffinity 2 36208 NULL
107598 +SYSC_process_vm_readv_36216 SYSC_process_vm_readv 5-3 36216 NULL
107599 +ubifs_read_nnode_36221 ubifs_read_nnode 0 36221 NULL
107600 +is_dirty_36223 is_dirty 2 36223 NULL
107601 +ept_walk_addr_36226 ept_walk_addr 3 36226 NULL
107602 +atomic_stats_read_36228 atomic_stats_read 3 36228 NULL
107603 +viafb_iga1_odev_proc_write_36241 viafb_iga1_odev_proc_write 3 36241 NULL
107604 +SYSC_getxattr_36242 SYSC_getxattr 4 36242 NULL
107605 +rproc_recovery_read_36245 rproc_recovery_read 3 36245 NULL
107606 +scrub_stripe_36248 scrub_stripe 5-4 36248 NULL
107607 +compat_sys_mbind_36256 compat_sys_mbind 5 36256 NULL
107608 +usb_buffer_alloc_36276 usb_buffer_alloc 2 36276 NULL nohasharray
107609 +cfs_hash_buckets_realloc_36276 cfs_hash_buckets_realloc 4 36276 &usb_buffer_alloc_36276
107610 +codec_reg_read_file_36280 codec_reg_read_file 3 36280 NULL
107611 +crypto_shash_digestsize_36284 crypto_shash_digestsize 0 36284 NULL
107612 +nouveau_cli_create_36293 nouveau_cli_create 3 36293 NULL
107613 +lpfc_debugfs_dif_err_read_36303 lpfc_debugfs_dif_err_read 3 36303 NULL
107614 +cfg80211_rx_mlme_mgmt_36306 cfg80211_rx_mlme_mgmt 3 36306 NULL
107615 +ad7879_spi_xfer_36311 ad7879_spi_xfer 3 36311 NULL
107616 +fuse_get_user_addr_36312 fuse_get_user_addr 0 36312 NULL
107617 +fat_compat_ioctl_filldir_36328 fat_compat_ioctl_filldir 3 36328 NULL
107618 +lc_create_36332 lc_create 4 36332 NULL
107619 +jbd2_journal_init_revoke_table_36336 jbd2_journal_init_revoke_table 1 36336 NULL
107620 +isku_sysfs_read_key_mask_36343 isku_sysfs_read_key_mask 6 36343 NULL
107621 +ath6kl_regwrite_write_36351 ath6kl_regwrite_write 3 36351 NULL
107622 +v9fs_file_readn_36353 v9fs_file_readn 4 36353 NULL nohasharray
107623 +xz_dec_lzma2_create_36353 xz_dec_lzma2_create 2 36353 &v9fs_file_readn_36353
107624 +to_sector_36361 to_sector 0-1 36361 NULL
107625 +irq_domain_disassociate_36367 irq_domain_disassociate 2 36367 NULL
107626 +posix_acl_create_36383 posix_acl_create 0 36383 NULL
107627 +tunables_read_36385 tunables_read 3 36385 NULL
107628 +afs_alloc_flat_call_36399 afs_alloc_flat_call 3-2 36399 NULL
107629 +sierra_write_36402 sierra_write 4 36402 NULL
107630 +msb_update_block_36412 msb_update_block 2 36412 NULL
107631 +SyS_sethostname_36417 SyS_sethostname 2 36417 NULL
107632 +ReadW6692B_36445 ReadW6692B 0 36445 NULL
107633 +sctp_tsnmap_init_36446 sctp_tsnmap_init 2 36446 NULL
107634 +alloc_etherdev_mqs_36450 alloc_etherdev_mqs 1 36450 NULL
107635 +check_extent_in_eb_36467 check_extent_in_eb 0 36467 NULL
107636 +SyS_process_vm_writev_36476 SyS_process_vm_writev 5-3 36476 NULL
107637 +b43_nphy_load_samples_36481 b43_nphy_load_samples 3 36481 NULL
107638 +set_var_mtrr_range_36483 set_var_mtrr_range 2 36483 NULL
107639 +ip6_append_data_36490 ip6_append_data 4 36490 NULL nohasharray
107640 +tx_tx_checksum_result_read_36490 tx_tx_checksum_result_read 3 36490 &ip6_append_data_36490
107641 +cmd_loop_36491 cmd_loop 0 36491 NULL
107642 +__hwahc_op_set_ptk_36510 __hwahc_op_set_ptk 5 36510 NULL
107643 +mcam_v4l_read_36513 mcam_v4l_read 3 36513 NULL
107644 +get_param_l_36518 get_param_l 0 36518 NULL
107645 +ieee80211_if_read_fwded_frames_36520 ieee80211_if_read_fwded_frames 3 36520 NULL
107646 +lguest_setup_irq_36531 lguest_setup_irq 1 36531 NULL
107647 +crypto_aead_authsize_36537 crypto_aead_authsize 0 36537 NULL
107648 +cpu_type_read_36540 cpu_type_read 3 36540 NULL
107649 +__kfifo_to_user_36555 __kfifo_to_user 3-0 36555 NULL nohasharray
107650 +macvtap_do_read_36555 macvtap_do_read 4 36555 &__kfifo_to_user_36555
107651 +btrfs_get_token_64_36572 btrfs_get_token_64 0 36572 NULL
107652 +__erst_read_36579 __erst_read 0 36579 NULL
107653 +put_cmsg_36589 put_cmsg 4 36589 NULL
107654 +do_sendfile_36610 do_sendfile 4-5 36610 NULL
107655 +fat_ioctl_filldir_36621 fat_ioctl_filldir 3 36621 NULL
107656 +i915_gem_execbuffer_relocate_slow_36634 i915_gem_execbuffer_relocate_slow 0 36634 NULL
107657 +vxge_config_vpaths_36636 vxge_config_vpaths 0 36636 NULL
107658 +convert_extent_item_v0_36645 convert_extent_item_v0 4-0 36645 NULL
107659 +ced_ioctl_36647 ced_ioctl 2 36647 NULL
107660 +lpfc_idiag_extacc_alloc_get_36648 lpfc_idiag_extacc_alloc_get 0-3 36648 NULL
107661 +osd_req_list_collection_objects_36664 osd_req_list_collection_objects 5 36664 NULL
107662 +iscsi_host_alloc_36671 iscsi_host_alloc 2 36671 NULL
107663 +xillybus_read_36678 xillybus_read 3 36678 NULL
107664 +tlv_put_btrfs_timespec_36685 tlv_put_btrfs_timespec 0 36685 NULL nohasharray
107665 +ext4_mb_discard_group_preallocations_36685 ext4_mb_discard_group_preallocations 2 36685 &tlv_put_btrfs_timespec_36685
107666 +gsmtty_write_36702 gsmtty_write 3 36702 NULL
107667 +sched_clock_36717 sched_clock 0 36717 NULL
107668 +ocfs2_rotate_tree_right_36723 ocfs2_rotate_tree_right 0 36723 NULL
107669 +snd_rawmidi_kernel_read1_36740 snd_rawmidi_kernel_read1 4-0 36740 NULL
107670 +cxgbi_device_register_36746 cxgbi_device_register 2-1 36746 NULL
107671 +fc_exch_mgr_alloc_36751 fc_exch_mgr_alloc 3-4 36751 NULL
107672 +ps_poll_upsd_timeouts_read_36755 ps_poll_upsd_timeouts_read 3 36755 NULL
107673 +ext4_ext_convert_to_initialized_36765 ext4_ext_convert_to_initialized 0 36765 NULL
107674 +ptp_filter_init_36780 ptp_filter_init 2 36780 NULL
107675 +i40e_init_lan_hmc_36796 i40e_init_lan_hmc 2-3-4-5 36796 NULL
107676 +proc_fault_inject_read_36802 proc_fault_inject_read 3 36802 NULL
107677 +hiddev_ioctl_36816 hiddev_ioctl 2 36816 NULL
107678 +ocfs2_journal_access_rb_36823 ocfs2_journal_access_rb 0 36823 NULL
107679 +int_hardware_entry_36833 int_hardware_entry 3 36833 NULL
107680 +fc_change_queue_depth_36841 fc_change_queue_depth 2 36841 NULL
107681 +keyctl_describe_key_36853 keyctl_describe_key 3 36853 NULL
107682 +cm_write_36858 cm_write 3 36858 NULL
107683 +range_to_mtrr_with_hole_36861 range_to_mtrr_with_hole 2 36861 NULL
107684 +tx_tx_data_programmed_read_36871 tx_tx_data_programmed_read 3 36871 NULL
107685 +svc_setsockopt_36876 svc_setsockopt 5 36876 NULL
107686 +raid56_parity_write_36877 raid56_parity_write 5 36877 NULL
107687 +__btrfs_map_block_36883 __btrfs_map_block 3 36883 NULL
107688 +ib_ucm_alloc_data_36885 ib_ucm_alloc_data 3 36885 NULL
107689 +selinux_inode_notifysecctx_36896 selinux_inode_notifysecctx 3 36896 NULL
107690 +OS_kmalloc_36909 OS_kmalloc 1 36909 NULL
107691 +audio_set_endpoint_req_36918 audio_set_endpoint_req 0 36918 NULL
107692 +build_key_36931 build_key 1 36931 NULL
107693 +crypto_blkcipher_ivsize_36944 crypto_blkcipher_ivsize 0 36944 NULL
107694 +div_u64_36951 div_u64 0 36951 NULL
107695 +write_leb_36957 write_leb 0-5 36957 NULL
107696 +ntfs_external_attr_find_36963 ntfs_external_attr_find 0 36963 NULL
107697 +sparse_early_mem_maps_alloc_node_36971 sparse_early_mem_maps_alloc_node 4 36971 NULL
107698 +il4965_rs_sta_dbgfs_scale_table_write_36979 il4965_rs_sta_dbgfs_scale_table_write 3 36979 NULL
107699 +drbd_new_dev_size_36998 drbd_new_dev_size 0-3 36998 NULL
107700 +auok190xfb_write_37001 auok190xfb_write 3 37001 NULL
107701 +setxattr_37006 setxattr 4 37006 NULL
107702 +btrfs_update_root_37035 btrfs_update_root 0 37035 NULL
107703 +ocfs2_dlm_unlock_37037 ocfs2_dlm_unlock 0 37037 NULL
107704 +command_file_read_37038 command_file_read 3 37038 NULL
107705 +__hfsplus_brec_find_37048 __hfsplus_brec_find 0 37048 NULL
107706 +figure_loop_size_37051 figure_loop_size 2-3 37051 NULL
107707 +qp_broker_create_37053 qp_broker_create 6-5 37053 NULL nohasharray
107708 +ieee80211_if_read_drop_unencrypted_37053 ieee80211_if_read_drop_unencrypted 3 37053 &qp_broker_create_37053
107709 +SYSC_setxattr_37078 SYSC_setxattr 4 37078 NULL
107710 +parse_command_37079 parse_command 2 37079 NULL
107711 +pipeline_cs_rx_packet_in_read_37089 pipeline_cs_rx_packet_in_read 3 37089 NULL
107712 +tun_get_user_37094 tun_get_user 5 37094 NULL
107713 +has_wrprotected_page_37123 has_wrprotected_page 3-2 37123 NULL
107714 +snd_hda_get_conn_list_37132 snd_hda_get_conn_list 0 37132 NULL
107715 +mtt_free_res_37144 mtt_free_res 5 37144 NULL
107716 +msg_word_37164 msg_word 0 37164 NULL
107717 +BeceemNVMRead_37166 BeceemNVMRead 0 37166 NULL
107718 +f2fs_direct_IO_37167 f2fs_direct_IO 4 37167 NULL
107719 +can_set_xattr_37182 can_set_xattr 4 37182 NULL
107720 +btrfs_insert_delayed_items_37191 btrfs_insert_delayed_items 0 37191 NULL
107721 +vcc_recvmsg_37198 vcc_recvmsg 4 37198 NULL
107722 +sysfs_add_file_37200 sysfs_add_file 0 37200 NULL
107723 +mpol_misplaced_37205 mpol_misplaced 0 37205 NULL
107724 +forced_ps_write_37209 forced_ps_write 3 37209 NULL
107725 +crypto_shash_descsize_37212 crypto_shash_descsize 0 37212 NULL nohasharray
107726 +ext4_ind_direct_IO_37212 ext4_ind_direct_IO 0-4 37212 &crypto_shash_descsize_37212
107727 +bchannel_get_rxbuf_37213 bchannel_get_rxbuf 2-0 37213 NULL
107728 +regmap_access_read_file_37223 regmap_access_read_file 3 37223 NULL
107729 +__do_replace_37227 __do_replace 5 37227 NULL
107730 +produce_free_peb_37232 produce_free_peb 0 37232 NULL
107731 +iwl_dbgfs_d3_sram_read_37237 iwl_dbgfs_d3_sram_read 3 37237 NULL
107732 +rx_filter_dup_filter_read_37238 rx_filter_dup_filter_read 3 37238 NULL
107733 +BeceemFlashBulkWrite_37255 BeceemFlashBulkWrite 0 37255 NULL
107734 +prot_queue_del_37258 prot_queue_del 0 37258 NULL
107735 +exofs_max_io_pages_37263 exofs_max_io_pages 0-2 37263 NULL
107736 +__add_keyed_refs_37264 __add_keyed_refs 0 37264 NULL
107737 +nested_svm_map_37268 nested_svm_map 2 37268 NULL
107738 +create_cq_user_37278 create_cq_user 5 37278 NULL
107739 +request_threaded_irq_37303 request_threaded_irq 0 37303 NULL
107740 +ieee80211_if_read_power_mode_37305 ieee80211_if_read_power_mode 3 37305 NULL
107741 +ext3_direct_IO_37308 ext3_direct_IO 4 37308 NULL
107742 +jffs2_write_dirent_37311 jffs2_write_dirent 5 37311 NULL
107743 +send_msg_37323 send_msg 4 37323 NULL
107744 +l2cap_create_connless_pdu_37327 l2cap_create_connless_pdu 3 37327 NULL nohasharray
107745 +bnx2x_vf_fill_fw_str_37327 bnx2x_vf_fill_fw_str 3 37327 &l2cap_create_connless_pdu_37327
107746 +scsi_mode_select_37330 scsi_mode_select 6 37330 NULL
107747 +rxrpc_server_sendmsg_37331 rxrpc_server_sendmsg 4 37331 NULL
107748 +lz4_compressbound_37337 lz4_compressbound 0-1 37337 NULL
107749 +move_vma_37341 move_vma 0-5 37341 NULL
107750 +security_inode_getsecurity_37354 security_inode_getsecurity 0 37354 NULL
107751 +cl_io_submit_rw_37374 cl_io_submit_rw 0 37374 NULL
107752 +iterate_leaf_refs_37385 iterate_leaf_refs 0 37385 NULL
107753 +iommu_num_pages_37391 iommu_num_pages 0-1-3-2 37391 NULL
107754 +btrfs_find_free_objectid_37412 btrfs_find_free_objectid 0 37412 NULL
107755 +hci_sock_sendmsg_37420 hci_sock_sendmsg 4 37420 NULL
107756 +acpi_os_allocate_zeroed_37422 acpi_os_allocate_zeroed 1 37422 NULL nohasharray
107757 +find_next_bit_37422 find_next_bit 0-3-2 37422 &acpi_os_allocate_zeroed_37422
107758 +ocfs2_insert_path_37425 ocfs2_insert_path 0 37425 NULL
107759 +tty_insert_flip_string_fixed_flag_37428 tty_insert_flip_string_fixed_flag 4-0 37428 NULL
107760 +iwl_print_last_event_logs_37433 iwl_print_last_event_logs 0-7-9 37433 NULL
107761 +fru_alloc_37442 fru_alloc 1 37442 NULL
107762 +tcp_established_options_37450 tcp_established_options 0 37450 NULL
107763 +brcmf_sdio_dump_console_37455 brcmf_sdio_dump_console 4 37455 NULL
107764 +__remove_37457 __remove 2 37457 NULL
107765 +ufs_data_ptr_to_cpu_37475 ufs_data_ptr_to_cpu 0 37475 NULL
107766 +get_est_timing_37484 get_est_timing 0 37484 NULL
107767 +kmem_realloc_37489 kmem_realloc 2 37489 NULL
107768 +__hfsplus_setxattr_37499 __hfsplus_setxattr 4 37499 NULL
107769 +bitmap_dirty_bits_37503 bitmap_dirty_bits 2 37503 NULL
107770 +osc_active_seq_write_37514 osc_active_seq_write 3 37514 NULL
107771 +bdev_writeseg_37519 bdev_writeseg 2-3 37519 NULL nohasharray
107772 +vmalloc_32_user_37519 vmalloc_32_user 1 37519 &bdev_writeseg_37519
107773 +xz_dec_test_write_37527 xz_dec_test_write 3 37527 NULL
107774 +fault_inject_read_37534 fault_inject_read 3 37534 NULL
107775 +hdr_size_37536 hdr_size 0 37536 NULL
107776 +extent_map_end_37550 extent_map_end 0 37550 NULL
107777 +sep_create_dcb_dmatables_context_37551 sep_create_dcb_dmatables_context 6 37551 NULL
107778 +ioat_chansts_37558 ioat_chansts 0 37558 NULL
107779 +xhci_alloc_streams_37586 xhci_alloc_streams 5 37586 NULL
107780 +ocfs2_add_branch_37588 ocfs2_add_branch 0 37588 NULL
107781 +alloc_descs_37593 alloc_descs 0-1-3 37593 NULL
107782 +qla2x00_debounce_register_37597 qla2x00_debounce_register 0 37597 NULL
107783 +find_extent_clone_37603 find_extent_clone 0 37603 NULL
107784 +btrfs_write_and_wait_marked_extents_37604 btrfs_write_and_wait_marked_extents 0 37604 NULL
107785 +kvm_read_guest_page_mmu_37611 kvm_read_guest_page_mmu 6-3 37611 NULL
107786 +SYSC_mbind_37622 SYSC_mbind 5 37622 NULL nohasharray
107787 +ocfs2_split_refcount_rec_37622 ocfs2_split_refcount_rec 0 37622 &SYSC_mbind_37622
107788 +btrfs_calc_trans_metadata_size_37629 btrfs_calc_trans_metadata_size 0-2 37629 NULL nohasharray
107789 +policy_residency_37629 policy_residency 0 37629 &btrfs_calc_trans_metadata_size_37629
107790 +alloc_fd_37637 alloc_fd 1 37637 NULL
107791 +SyS_mbind_37638 SyS_mbind 5 37638 NULL
107792 +bio_copy_user_iov_37660 bio_copy_user_iov 4 37660 NULL
107793 +rfcomm_sock_sendmsg_37661 rfcomm_sock_sendmsg 4 37661 NULL nohasharray
107794 +vmw_framebuffer_dmabuf_dirty_37661 vmw_framebuffer_dmabuf_dirty 6 37661 &rfcomm_sock_sendmsg_37661
107795 +SYSC_get_mempolicy_37664 SYSC_get_mempolicy 4-3 37664 NULL
107796 +lnw_gpio_to_irq_37665 lnw_gpio_to_irq 2 37665 NULL
107797 +ieee80211_if_read_rc_rateidx_mcs_mask_2ghz_37675 ieee80211_if_read_rc_rateidx_mcs_mask_2ghz 3 37675 NULL
107798 +regmap_map_read_file_37685 regmap_map_read_file 3 37685 NULL
107799 +page_chain_free_37697 page_chain_free 0 37697 NULL
107800 +nametbl_header_37698 nametbl_header 2-0 37698 NULL
107801 +__le32_to_cpup_37702 __le32_to_cpup 0 37702 NULL
107802 +dynamic_ps_timeout_write_37713 dynamic_ps_timeout_write 3 37713 NULL
107803 +dm_thin_remove_block_37724 dm_thin_remove_block 2 37724 NULL
107804 +find_active_uprobe_37733 find_active_uprobe 1 37733 NULL
107805 +read_enabled_file_bool_37744 read_enabled_file_bool 3 37744 NULL
107806 +ocfs2_duplicate_clusters_by_jbd_37749 ocfs2_duplicate_clusters_by_jbd 5-4-6 37749 NULL
107807 +ocfs2_control_cfu_37750 ocfs2_control_cfu 2 37750 NULL
107808 +ipath_cdev_init_37752 ipath_cdev_init 1 37752 NULL
107809 +dccp_setsockopt_cscov_37766 dccp_setsockopt_cscov 2 37766 NULL
107810 +il4965_rs_sta_dbgfs_rate_scale_data_read_37792 il4965_rs_sta_dbgfs_rate_scale_data_read 3 37792 NULL
107811 +smk_read_logging_37804 smk_read_logging 3 37804 NULL
107812 +deny_write_access_37813 deny_write_access 0 37813 NULL
107813 +ocrdma_alloc_frmr_page_list_37815 ocrdma_alloc_frmr_page_list 2 37815 NULL
107814 +rx_decrypt_key_not_found_read_37820 rx_decrypt_key_not_found_read 3 37820 NULL
107815 +bitmap_find_next_zero_area_37827 bitmap_find_next_zero_area 2-3-0-5-4 37827 NULL
107816 +android_get_p2p_addr_37832 android_get_p2p_addr 0 37832 NULL
107817 +jbd2_journal_get_undo_access_37837 jbd2_journal_get_undo_access 0 37837 NULL
107818 +o2hb_debug_read_37851 o2hb_debug_read 3 37851 NULL
107819 +SYSC_pwrite64_37862 SYSC_pwrite64 3 37862 NULL
107820 +xfs_dir2_block_to_sf_37868 xfs_dir2_block_to_sf 3 37868 NULL
107821 +set_registers_37883 set_registers 4 37883 NULL
107822 +btrfs_stack_file_extent_disk_bytenr_37888 btrfs_stack_file_extent_disk_bytenr 0 37888 NULL
107823 +max77686_irq_domain_map_37897 max77686_irq_domain_map 2 37897 NULL
107824 +tipc_link_send_sections_fast_37920 tipc_link_send_sections_fast 4 37920 NULL
107825 +_rtw_malloc_37928 _rtw_malloc 1 37928 NULL nohasharray
107826 +pkt_alloc_packet_data_37928 pkt_alloc_packet_data 1 37928 &_rtw_malloc_37928
107827 +read_rbu_packet_size_37939 read_rbu_packet_size 6 37939 NULL
107828 +write_file_bool_37957 write_file_bool 3 37957 NULL
107829 +fifo_alloc_37961 fifo_alloc 1 37961 NULL
107830 +ext3_free_blocks_sb_37967 ext3_free_blocks_sb 4-3 37967 NULL
107831 +rds_rdma_extra_size_37990 rds_rdma_extra_size 0 37990 NULL
107832 +persistent_ram_old_size_37997 persistent_ram_old_size 0 37997 NULL
107833 +vfs_readv_38011 vfs_readv 3 38011 NULL
107834 +aggr_recv_addba_req_evt_38037 aggr_recv_addba_req_evt 4 38037 NULL
107835 +SyS_pwrite64_38041 SyS_pwrite64 3 38041 NULL
107836 +klsi_105_prepare_write_buffer_38044 klsi_105_prepare_write_buffer 3 38044 NULL nohasharray
107837 +il_dbgfs_chain_noise_read_38044 il_dbgfs_chain_noise_read 3 38044 &klsi_105_prepare_write_buffer_38044
107838 +SyS_llistxattr_38048 SyS_llistxattr 3 38048 NULL
107839 +sysfs_do_create_link_38051 sysfs_do_create_link 0 38051 NULL
107840 +_xfs_buf_alloc_38058 _xfs_buf_alloc 3 38058 NULL nohasharray
107841 +is_discarded_38058 is_discarded 2 38058 &_xfs_buf_alloc_38058
107842 +nsm_create_handle_38060 nsm_create_handle 4 38060 NULL
107843 +alloc_ltalkdev_38071 alloc_ltalkdev 1 38071 NULL
107844 +xfs_buf_readahead_map_38081 xfs_buf_readahead_map 3 38081 NULL
107845 +uwb_mac_addr_print_38085 uwb_mac_addr_print 2 38085 NULL
107846 +request_key_auth_new_38092 request_key_auth_new 3 38092 NULL
107847 +proc_self_readlink_38094 proc_self_readlink 3 38094 NULL
107848 +ep0_read_38095 ep0_read 3 38095 NULL
107849 +sk_wmem_schedule_38096 sk_wmem_schedule 2 38096 NULL nohasharray
107850 +osc_checksum_seq_write_38096 osc_checksum_seq_write 3 38096 &sk_wmem_schedule_38096
107851 +o2hb_read_slots_38105 o2hb_read_slots 2 38105 NULL
107852 +snd_pcm_oss_write_38108 snd_pcm_oss_write 3 38108 NULL
107853 +_get_val_38115 _get_val 2 38115 NULL
107854 +vmw_kms_present_38130 vmw_kms_present 9 38130 NULL
107855 +__ntfs_copy_from_user_iovec_inatomic_38153 __ntfs_copy_from_user_iovec_inatomic 0-4-3 38153 NULL
107856 +btrfs_extent_same_38163 btrfs_extent_same 3-2 38163 NULL
107857 +kvm_clear_guest_38164 kvm_clear_guest 3-2 38164 NULL
107858 +cirrus_ttm_tt_create_38167 cirrus_ttm_tt_create 2 38167 NULL
107859 +send_rename_38170 send_rename 0 38170 NULL
107860 +cdev_add_38176 cdev_add 2-3 38176 NULL
107861 +create_srq_user_38191 create_srq_user 5 38191 NULL
107862 +rt2x00debug_write_rf_38195 rt2x00debug_write_rf 3 38195 NULL
107863 +do_chunk_alloc_38200 do_chunk_alloc 0 38200 NULL
107864 +get_ucode_user_38202 get_ucode_user 3 38202 NULL
107865 +ext3_new_block_38208 ext3_new_block 3-0 38208 NULL
107866 +stmpe_gpio_irq_map_38222 stmpe_gpio_irq_map 3 38222 NULL
107867 +osd_req_list_partition_collections_38223 osd_req_list_partition_collections 5 38223 NULL
107868 +palmas_gpio_to_irq_38235 palmas_gpio_to_irq 2 38235 NULL
107869 +ceph_decode_16_38239 ceph_decode_16 0 38239 NULL
107870 +_ipw_read_reg32_38245 _ipw_read_reg32 0 38245 NULL
107871 +snd_pcm_playback_rewind_38249 snd_pcm_playback_rewind 0-2 38249 NULL
107872 +from_dblock_38256 from_dblock 0-1 38256 NULL
107873 +vmci_qp_broker_set_page_store_38260 vmci_qp_broker_set_page_store 3-2 38260 NULL
107874 +SYSC_msgrcv_38268 SYSC_msgrcv 3 38268 NULL nohasharray
107875 +ieee80211_if_read_auto_open_plinks_38268 ieee80211_if_read_auto_open_plinks 3 38268 &SYSC_msgrcv_38268 nohasharray
107876 +mthca_alloc_icm_table_38268 mthca_alloc_icm_table 4-3 38268 &ieee80211_if_read_auto_open_plinks_38268
107877 +xfs_bmdr_to_bmbt_38275 xfs_bmdr_to_bmbt 5 38275 NULL nohasharray
107878 +xfs_bmbt_to_bmdr_38275 xfs_bmbt_to_bmdr 3 38275 &xfs_bmdr_to_bmbt_38275
107879 +ftdi_process_packet_38281 ftdi_process_packet 4 38281 NULL
107880 +gpa_to_gfn_38291 gpa_to_gfn 0-1 38291 NULL
107881 +ucma_query_path_38305 ucma_query_path 3 38305 NULL
107882 +isr_rx_headers_read_38325 isr_rx_headers_read 3 38325 NULL
107883 +ida_simple_get_38326 ida_simple_get 2-0 38326 NULL
107884 +ocfs2_rotate_rightmost_leaf_left_38330 ocfs2_rotate_rightmost_leaf_left 0 38330 NULL
107885 +__snd_gf1_look8_38333 __snd_gf1_look8 0 38333 NULL
107886 +ocfs2_replace_extent_rec_38357 ocfs2_replace_extent_rec 0 38357 NULL
107887 +btrfs_file_extent_disk_num_bytes_38363 btrfs_file_extent_disk_num_bytes 0 38363 NULL
107888 +dn_sendmsg_38390 dn_sendmsg 4 38390 NULL
107889 +get_valid_node_allowed_38412 get_valid_node_allowed 1-0 38412 NULL
107890 +ocfs2_which_cluster_group_38413 ocfs2_which_cluster_group 0-2 38413 NULL
107891 +ht_destroy_irq_38418 ht_destroy_irq 1 38418 NULL
107892 +ieee80211_if_read_dtim_count_38419 ieee80211_if_read_dtim_count 3 38419 NULL
107893 +pmcraid_copy_sglist_38431 pmcraid_copy_sglist 3 38431 NULL
107894 +kvm_write_guest_38454 kvm_write_guest 4-2 38454 NULL
107895 +btrfs_next_old_leaf_38465 btrfs_next_old_leaf 0 38465 NULL
107896 +kvm_arch_setup_async_pf_38481 kvm_arch_setup_async_pf 3 38481 NULL
107897 +blk_end_bidi_request_38482 blk_end_bidi_request 4-3 38482 NULL
107898 +cpu_to_mem_38501 cpu_to_mem 0 38501 NULL
107899 +dev_names_read_38509 dev_names_read 3 38509 NULL
107900 +iscsi_create_iface_38510 iscsi_create_iface 5 38510 NULL
107901 +event_rx_mismatch_read_38518 event_rx_mismatch_read 3 38518 NULL
107902 +mlx4_ib_db_map_user_38529 mlx4_ib_db_map_user 2 38529 NULL
107903 +ubifs_idx_node_sz_38546 ubifs_idx_node_sz 0-2 38546 NULL
107904 +btrfs_discard_extent_38547 btrfs_discard_extent 2 38547 NULL
107905 +kuc_len_38557 kuc_len 0-1 38557 NULL
107906 +cpu_to_node_38561 cpu_to_node 0 38561 NULL
107907 +irda_sendmsg_dgram_38563 irda_sendmsg_dgram 4 38563 NULL
107908 +il4965_rs_sta_dbgfs_scale_table_read_38564 il4965_rs_sta_dbgfs_scale_table_read 3 38564 NULL
107909 +_ipw_read32_38565 _ipw_read32 0 38565 NULL
107910 +snd_nm256_playback_copy_38567 snd_nm256_playback_copy 5-3 38567 NULL
107911 +copy_ctl_value_to_user_38587 copy_ctl_value_to_user 4 38587 NULL
107912 +compat_sys_ptrace_38595 compat_sys_ptrace 3 38595 NULL
107913 +blkg_stat_read_38612 blkg_stat_read 0 38612 NULL
107914 +icn_writecmd_38629 icn_writecmd 2 38629 NULL
107915 +write_enabled_file_bool_38630 write_enabled_file_bool 3 38630 NULL
107916 +ext2_readpages_38640 ext2_readpages 4 38640 NULL
107917 +cma_create_area_38642 cma_create_area 2 38642 NULL
107918 +audit_init_entry_38644 audit_init_entry 1 38644 NULL
107919 +qp_broker_alloc_38646 qp_broker_alloc 6-5 38646 NULL
107920 +mmc_send_cxd_data_38655 mmc_send_cxd_data 5 38655 NULL
107921 +nouveau_instmem_create__38664 nouveau_instmem_create_ 4 38664 NULL
107922 +snd_es1371_wait_src_ready_38673 snd_es1371_wait_src_ready 0 38673 NULL
107923 +iscsit_dump_data_payload_38683 iscsit_dump_data_payload 2 38683 NULL
107924 +ext4_wait_block_bitmap_38695 ext4_wait_block_bitmap 2 38695 NULL
107925 +validate_vid_hdr_38699 validate_vid_hdr 0 38699 NULL
107926 +rbio_add_io_page_38700 rbio_add_io_page 6 38700 NULL
107927 +find_next_usable_block_38716 find_next_usable_block 3-1-0 38716 NULL
107928 +alloc_trace_probe_38720 alloc_trace_probe 6 38720 NULL
107929 +w83977af_sir_interrupt_38738 w83977af_sir_interrupt 0 38738 NULL
107930 +udf_readpages_38761 udf_readpages 4 38761 NULL
107931 +iwl_dbgfs_thermal_throttling_read_38779 iwl_dbgfs_thermal_throttling_read 3 38779 NULL
107932 +bcache_device_init_38781 bcache_device_init 3 38781 NULL
107933 +snd_gus_dram_write_38784 snd_gus_dram_write 4 38784 NULL
107934 +slab_order_38794 slab_order 0 38794 NULL
107935 +do_pci_enable_device_38802 do_pci_enable_device 0 38802 NULL
107936 +err_decode_38804 err_decode 2 38804 NULL
107937 +btrfs_find_space_cluster_38812 btrfs_find_space_cluster 4-0 38812 NULL
107938 +ipv6_renew_option_38813 ipv6_renew_option 3 38813 NULL
107939 +direct_entry_38836 direct_entry 3 38836 NULL
107940 +compat_udp_setsockopt_38840 compat_udp_setsockopt 5 38840 NULL
107941 +read_nic_io_word_38853 read_nic_io_word 0 38853 NULL
107942 +interfaces_38859 interfaces 2 38859 NULL
107943 +pci_msix_table_size_38867 pci_msix_table_size 0 38867 NULL
107944 +dbgfs_state_38894 dbgfs_state 3 38894 NULL
107945 +f2fs_xattr_set_acl_38895 f2fs_xattr_set_acl 4 38895 NULL
107946 +traverse_38897 traverse 0 38897 NULL
107947 +__fswab16_38898 __fswab16 0 38898 NULL
107948 +ext3_trim_all_free_38929 ext3_trim_all_free 4-3-2 38929 NULL
107949 +il_dbgfs_sram_write_38942 il_dbgfs_sram_write 3 38942 NULL
107950 +__ath6kl_wmi_send_mgmt_cmd_38971 __ath6kl_wmi_send_mgmt_cmd 7 38971 NULL
107951 +C_SYSC_preadv64_38977 C_SYSC_preadv64 3 38977 NULL nohasharray
107952 +usb_maxpacket_38977 usb_maxpacket 0 38977 &C_SYSC_preadv64_38977
107953 +OSDSetBlock_38986 OSDSetBlock 2-4 38986 NULL
107954 +lpfc_idiag_extacc_write_38998 lpfc_idiag_extacc_write 3 38998 NULL
107955 +udf_new_block_38999 udf_new_block 4 38999 NULL
107956 +get_nodes_39012 get_nodes 3 39012 NULL
107957 +twl6030_interrupt_unmask_39013 twl6030_interrupt_unmask 2 39013 NULL
107958 +__blkdev_issue_zeroout_39020 __blkdev_issue_zeroout 3-0 39020 NULL
107959 +qgroup_account_ref_step1_39024 qgroup_account_ref_step1 0 39024 NULL
107960 +acpi_install_gpe_block_39031 acpi_install_gpe_block 4 39031 NULL
107961 +_zd_iowrite32v_async_locked_39034 _zd_iowrite32v_async_locked 3 39034 NULL
107962 +do_write_kmem_39051 do_write_kmem 1-3-0 39051 NULL
107963 +gen_pool_create_39064 gen_pool_create 2 39064 NULL
107964 +ext4_init_block_bitmap_39071 ext4_init_block_bitmap 3 39071 NULL
107965 +ReadHFC_39104 ReadHFC 0 39104 NULL
107966 +tomoyo_truncate_39105 tomoyo_truncate 0 39105 NULL
107967 +leb_write_lock_39111 leb_write_lock 0 39111 NULL
107968 +__kfifo_to_user_r_39123 __kfifo_to_user_r 5-3 39123 NULL
107969 +ttm_mem_global_alloc_zone_39125 ttm_mem_global_alloc_zone 0 39125 NULL
107970 +ea_foreach_39133 ea_foreach 0 39133 NULL
107971 +generic_permission_39150 generic_permission 0 39150 NULL
107972 +proc_coredump_filter_read_39153 proc_coredump_filter_read 3 39153 NULL
107973 +echo_client_kbrw_39170 echo_client_kbrw 6 39170 NULL
107974 +btrfs_start_delalloc_flush_39173 btrfs_start_delalloc_flush 0 39173 NULL
107975 +ext3_xattr_check_names_39174 ext3_xattr_check_names 0 39174 NULL
107976 +ubi_more_update_data_39189 ubi_more_update_data 4-0 39189 NULL
107977 +qcam_read_bytes_39205 qcam_read_bytes 0 39205 NULL
107978 +ivtv_v4l2_write_39226 ivtv_v4l2_write 3 39226 NULL
107979 +posix_acl_to_xattr_39237 posix_acl_to_xattr 0 39237 NULL
107980 +drm_order_39244 drm_order 0 39244 NULL
107981 +snd_pcm_capture_forward_39248 snd_pcm_capture_forward 0-2 39248 NULL
107982 +r128_compat_ioctl_39250 r128_compat_ioctl 2 39250 NULL nohasharray
107983 +pwr_cont_miss_bcns_spread_read_39250 pwr_cont_miss_bcns_spread_read 3 39250 &r128_compat_ioctl_39250
107984 +i915_error_state_read_39254 i915_error_state_read 3 39254 NULL
107985 +bitmap_set_bits_39272 bitmap_set_bits 3 39272 NULL
107986 +rx_filter_protection_filter_read_39282 rx_filter_protection_filter_read 3 39282 NULL
107987 +__vmalloc_node_39308 __vmalloc_node 1-5 39308 NULL
107988 +__cfg80211_connect_result_39326 __cfg80211_connect_result 4-6 39326 NULL
107989 +insert_reserved_file_extent_39327 insert_reserved_file_extent 3-0 39327 NULL
107990 +wimax_msg_alloc_39343 wimax_msg_alloc 4 39343 NULL
107991 +__copy_from_user_nocache_39351 __copy_from_user_nocache 3-0 39351 NULL
107992 +ide_complete_rq_39354 ide_complete_rq 3 39354 NULL
107993 +do_write_log_from_user_39362 do_write_log_from_user 3-0 39362 NULL
107994 +vortex_wtdma_getlinearpos_39371 vortex_wtdma_getlinearpos 0 39371 NULL
107995 +regmap_name_read_file_39379 regmap_name_read_file 3 39379 NULL
107996 +fnic_trace_debugfs_read_39380 fnic_trace_debugfs_read 3 39380 NULL
107997 +ps_poll_ps_poll_utilization_read_39383 ps_poll_ps_poll_utilization_read 3 39383 NULL
107998 +__send_to_port_39386 __send_to_port 3 39386 NULL
107999 +perf_trace_buf_submit_39413 perf_trace_buf_submit 5 39413 NULL
108000 +user_power_read_39414 user_power_read 3 39414 NULL
108001 +alloc_agpphysmem_i8xx_39427 alloc_agpphysmem_i8xx 1 39427 NULL
108002 +__vb2_get_done_vb_39448 __vb2_get_done_vb 0 39448 NULL
108003 +apei_resources_add_39470 apei_resources_add 0 39470 NULL
108004 +setkey_unaligned_39474 setkey_unaligned 3 39474 NULL
108005 +do_get_mempolicy_39485 do_get_mempolicy 3 39485 NULL
108006 +ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries_39499 ieee80211_if_fmt_dot11MeshHWMPmaxPREQretries 3 39499 NULL
108007 +cl_req_alloc_39523 cl_req_alloc 4 39523 NULL
108008 +int_proc_write_39542 int_proc_write 3 39542 NULL
108009 +pp_write_39554 pp_write 3 39554 NULL
108010 +ol_dqblk_block_39558 ol_dqblk_block 2-0-3 39558 NULL
108011 +datablob_format_39571 datablob_format 2 39571 NULL nohasharray
108012 +ieee80211_if_read_fwded_mcast_39571 ieee80211_if_read_fwded_mcast 3 39571 &datablob_format_39571
108013 +ext_depth_39607 ext_depth 0 39607 NULL
108014 +nfs_idmap_get_key_39616 nfs_idmap_get_key 2 39616 NULL
108015 +sdio_readb_39618 sdio_readb 0 39618 NULL
108016 +set_dev_class_39645 set_dev_class 4 39645 NULL nohasharray
108017 +dm_exception_table_init_39645 dm_exception_table_init 2 39645 &set_dev_class_39645
108018 +snd_rme32_capture_copy_39653 snd_rme32_capture_copy 5 39653 NULL
108019 +tcp_try_rmem_schedule_39657 tcp_try_rmem_schedule 3 39657 NULL
108020 +kvm_read_guest_cached_39666 kvm_read_guest_cached 4 39666 NULL
108021 +v4l_stk_read_39672 v4l_stk_read 3 39672 NULL
108022 +hsc_msg_len_get_39673 hsc_msg_len_get 0 39673 NULL
108023 +do_surface_dirty_sou_39678 do_surface_dirty_sou 7 39678 NULL
108024 +vmx_set_cr3_39688 vmx_set_cr3 2 39688 NULL
108025 +sd_completed_bytes_39705 sd_completed_bytes 0 39705 NULL
108026 +ftrace_pid_write_39710 ftrace_pid_write 3 39710 NULL
108027 +test_add_free_space_entry_39754 test_add_free_space_entry 2-3 39754 NULL
108028 +adt7316_spi_multi_read_39765 adt7316_spi_multi_read 3 39765 NULL
108029 +remap_to_origin_clear_discard_39767 remap_to_origin_clear_discard 3 39767 NULL
108030 +crypto_ablkcipher_blocksize_39811 crypto_ablkcipher_blocksize 0 39811 NULL
108031 +security_inode_listsecurity_39812 security_inode_listsecurity 0 39812 NULL
108032 +snd_pcm_oss_writev3_39818 snd_pcm_oss_writev3 3 39818 NULL
108033 +get_priv_size_39828 get_priv_size 0-1 39828 NULL
108034 +can_rmdir_39890 can_rmdir 0 39890 NULL
108035 +pkt_add_39897 pkt_add 3 39897 NULL
108036 +read_file_modal_eeprom_39909 read_file_modal_eeprom 3 39909 NULL
108037 +gen_pool_add_virt_39913 gen_pool_add_virt 4-5 39913 NULL
108038 +dw210x_op_rw_39915 dw210x_op_rw 6 39915 NULL
108039 +dma_to_mm_pfn_39916 dma_to_mm_pfn 0-1 39916 NULL
108040 +aes_encrypt_interrupt_read_39919 aes_encrypt_interrupt_read 3 39919 NULL
108041 +exofs_read_kern_39921 exofs_read_kern 6 39921 NULL nohasharray
108042 +oom_score_adj_read_39921 oom_score_adj_read 3 39921 &exofs_read_kern_39921
108043 +__spi_async_39932 __spi_async 0 39932 NULL
108044 +__get_order_39935 __get_order 0 39935 NULL
108045 +error_error_frame_read_39947 error_error_frame_read 3 39947 NULL
108046 +tty_prepare_flip_string_39955 tty_prepare_flip_string 3-0 39955 NULL
108047 +lstcon_group_list_39958 lstcon_group_list 2 39958 NULL
108048 +dma_push_rx_39973 dma_push_rx 2 39973 NULL
108049 +broadsheetfb_write_39976 broadsheetfb_write 3 39976 NULL
108050 +mthca_array_init_39987 mthca_array_init 2 39987 NULL
108051 +fw_device_op_read_39990 fw_device_op_read 3 39990 NULL
108052 +server_name2svname_39998 server_name2svname 4 39998 NULL
108053 +xen_hvm_config_40018 xen_hvm_config 2 40018 NULL
108054 +ivtvfb_write_40023 ivtvfb_write 3 40023 NULL
108055 +disc_pwup_write_40027 disc_pwup_write 3 40027 NULL
108056 +ea_foreach_i_40028 ea_foreach_i 0 40028 NULL
108057 +datablob_hmac_append_40038 datablob_hmac_append 3 40038 NULL
108058 +regmap_add_irq_chip_40042 regmap_add_irq_chip 4 40042 NULL
108059 +ocfs2_claim_clusters_40050 ocfs2_claim_clusters 0 40050 NULL
108060 +add_tty_40055 add_tty 1 40055 NULL nohasharray
108061 +l2cap_create_iframe_pdu_40055 l2cap_create_iframe_pdu 3 40055 &add_tty_40055
108062 +atomic_xchg_40070 atomic_xchg 0 40070 NULL
108063 +snd_pcm_sw_params_user_40095 snd_pcm_sw_params_user 0 40095 NULL
108064 +gen_pool_first_fit_40110 gen_pool_first_fit 2-3-4 40110 NULL
108065 +sctp_setsockopt_delayed_ack_40129 sctp_setsockopt_delayed_ack 3 40129 NULL
108066 +dwc2_max_desc_num_40132 dwc2_max_desc_num 0 40132 NULL
108067 +rx_rx_frame_checksum_read_40140 rx_rx_frame_checksum_read 3 40140 NULL
108068 +ath10k_write_simulate_fw_crash_40143 ath10k_write_simulate_fw_crash 3 40143 NULL
108069 +iwch_alloc_fastreg_pbl_40153 iwch_alloc_fastreg_pbl 2 40153 NULL
108070 +clone_backref_node_40154 clone_backref_node 0 40154 NULL
108071 +pt_write_40159 pt_write 3 40159 NULL
108072 +scsi_sg_count_40182 scsi_sg_count 0 40182 NULL
108073 +ipr_alloc_ucode_buffer_40199 ipr_alloc_ucode_buffer 1 40199 NULL nohasharray
108074 +devnode_find_40199 devnode_find 3-2 40199 &ipr_alloc_ucode_buffer_40199
108075 +allocate_probes_40204 allocate_probes 1 40204 NULL
108076 +acpi_system_write_alarm_40205 acpi_system_write_alarm 3 40205 NULL
108077 +au0828_v4l2_read_40220 au0828_v4l2_read 3 40220 NULL
108078 +compress_file_range_40225 compress_file_range 3-4 40225 NULL
108079 +osst_read_40237 osst_read 3 40237 NULL
108080 +lpage_info_slot_40243 lpage_info_slot 3-1 40243 NULL
108081 +ocfs2_zero_extend_get_range_40248 ocfs2_zero_extend_get_range 4-3 40248 NULL
108082 +rs_sta_dbgfs_scale_table_read_40262 rs_sta_dbgfs_scale_table_read 3 40262 NULL
108083 +ext2_fiemap_40271 ext2_fiemap 4 40271 NULL
108084 +usbnet_read_cmd_40275 usbnet_read_cmd 7 40275 NULL
108085 +rx_xfr_hint_trig_read_40283 rx_xfr_hint_trig_read 3 40283 NULL
108086 +vfio_unmap_unpin_40284 vfio_unmap_unpin 3 40284 NULL
108087 +_calc_trunk_info_40291 _calc_trunk_info 2 40291 NULL
108088 +SyS_bind_40303 SyS_bind 3 40303 NULL
108089 +ubi_io_write_data_40305 ubi_io_write_data 0-5-4 40305 NULL
108090 +batadv_tt_changes_fill_buff_40323 batadv_tt_changes_fill_buff 4 40323 NULL
108091 +ib_get_mad_data_offset_40336 ib_get_mad_data_offset 0 40336 NULL
108092 +mmio_read_40348 mmio_read 4 40348 NULL
108093 +ocfs2_release_clusters_40355 ocfs2_release_clusters 4-0 40355 NULL
108094 +event_rx_mem_empty_read_40363 event_rx_mem_empty_read 3 40363 NULL
108095 +ocfs2_check_range_for_refcount_40365 ocfs2_check_range_for_refcount 3-2 40365 NULL
108096 +get_chars_40373 get_chars 3 40373 NULL
108097 +submit_page_section_40408 submit_page_section 0 40408 NULL
108098 +ecryptfs_read_lower_40419 ecryptfs_read_lower 3 40419 NULL
108099 +slab_node_40421 slab_node 0 40421 NULL
108100 +fb_prepare_extra_logos_40429 fb_prepare_extra_logos 0-2 40429 NULL
108101 +tomoyo_update_policy_40458 tomoyo_update_policy 2 40458 NULL
108102 +zd_usb_scnprint_id_40459 zd_usb_scnprint_id 0-3 40459 NULL
108103 +SyS_writev_40467 SyS_writev 3 40467 NULL
108104 +__i915_add_request_40469 __i915_add_request 0 40469 NULL
108105 +SyS_select_40473 SyS_select 1 40473 NULL
108106 +afs_fs_store_data_40484 afs_fs_store_data 3-4-5-6 40484 NULL
108107 +batadv_hash_new_40491 batadv_hash_new 1 40491 NULL
108108 +devcgroup_inode_permission_40492 devcgroup_inode_permission 0 40492 NULL
108109 +drm_vma_node_start_40500 drm_vma_node_start 0 40500 NULL
108110 +sg_phys_40507 sg_phys 0 40507 NULL
108111 +__ethtool_get_sset_count_40511 __ethtool_get_sset_count 0 40511 NULL
108112 +TSS_checkhmac2_40520 TSS_checkhmac2 7-5 40520 NULL
108113 +ixgbe_dbg_reg_ops_read_40540 ixgbe_dbg_reg_ops_read 3 40540 NULL
108114 +i915_gem_execbuffer_relocate_object_slow_40546 i915_gem_execbuffer_relocate_object_slow 0 40546 NULL
108115 +ima_write_policy_40548 ima_write_policy 3 40548 NULL
108116 +btrfs_flush_all_pending_stuffs_40553 btrfs_flush_all_pending_stuffs 0 40553 NULL
108117 +esp_alloc_tmp_40558 esp_alloc_tmp 2-3 40558 NULL
108118 +ufs_inode_getfrag_40560 ufs_inode_getfrag 2-4 40560 NULL
108119 +bdev_sectors_40564 bdev_sectors 0 40564 NULL
108120 +lba_to_map_index_40580 lba_to_map_index 0-1 40580 NULL
108121 +b1_get_byte_40597 b1_get_byte 0 40597 NULL
108122 +get_priv_descr_and_size_40612 get_priv_descr_and_size 0 40612 NULL
108123 +qla8044_rd_reg_40652 qla8044_rd_reg 0 40652 NULL
108124 +pid_nr_ns_40654 pid_nr_ns 0 40654 NULL
108125 +twl4030_kpwrite_u8_40665 twl4030_kpwrite_u8 3 40665 NULL
108126 +__cfg80211_roamed_40668 __cfg80211_roamed 4-6 40668 NULL
108127 +pipeline_rx_complete_stat_fifo_int_read_40671 pipeline_rx_complete_stat_fifo_int_read 3 40671 NULL
108128 +fops_read_40672 fops_read 3 40672 NULL
108129 +ext4_mark_inode_dirty_40673 ext4_mark_inode_dirty 0 40673 NULL
108130 +idr_get_empty_slot_40674 idr_get_empty_slot 0 40674 NULL
108131 +alloc_rbio_40676 alloc_rbio 4 40676 NULL
108132 +videobuf_dma_init_user_locked_40678 videobuf_dma_init_user_locked 3-4 40678 NULL
108133 +pci_enable_resources_40680 pci_enable_resources 0 40680 NULL
108134 +nfc_hci_set_param_40697 nfc_hci_set_param 5 40697 NULL
108135 +__seq_open_private_40715 __seq_open_private 3 40715 NULL
108136 +btrfs_find_one_extref_40724 btrfs_find_one_extref 0 40724 NULL
108137 +fuse_readpages_40737 fuse_readpages 4 40737 NULL
108138 +xfs_iext_remove_direct_40744 xfs_iext_remove_direct 3 40744 NULL nohasharray
108139 +find_next_zero_bit_le_40744 find_next_zero_bit_le 2-3-0 40744 &xfs_iext_remove_direct_40744
108140 +i915_gem_execbuffer_relocate_entry_40750 i915_gem_execbuffer_relocate_entry 0 40750 NULL
108141 +security_inode_listxattr_40752 security_inode_listxattr 0 40752 NULL
108142 +card_send_command_40757 card_send_command 3 40757 NULL
108143 +ad1889_readl_40765 ad1889_readl 0 40765 NULL
108144 +pg_write_40766 pg_write 3 40766 NULL
108145 +show_list_40775 show_list 3-0 40775 NULL
108146 +calcu_metadata_size_40782 calcu_metadata_size 0 40782 NULL
108147 +kfifo_out_copy_r_40784 kfifo_out_copy_r 3-0 40784 NULL
108148 +bitmap_weight_40791 bitmap_weight 0-2 40791 NULL
108149 +pyra_sysfs_read_40795 pyra_sysfs_read 6 40795 NULL
108150 +add_action_40811 add_action 4 40811 NULL
108151 +nl80211_send_roamed_40825 nl80211_send_roamed 5-7 40825 NULL
108152 +SyS_mbind_40828 SyS_mbind 5 40828 NULL
108153 +__mlx4_qp_reserve_range_40847 __mlx4_qp_reserve_range 2-3 40847 NULL
108154 +nilfs_mdt_init_40849 nilfs_mdt_init 3 40849 NULL
108155 +ocfs2_zero_partial_clusters_40856 ocfs2_zero_partial_clusters 2-3 40856 NULL
108156 +v9fs_file_read_40858 v9fs_file_read 3 40858 NULL
108157 +read_file_queue_40895 read_file_queue 3 40895 NULL
108158 +btrfs_set_inode_index_count_40901 btrfs_set_inode_index_count 0 40901 NULL
108159 +waiters_read_40902 waiters_read 3 40902 NULL
108160 +isdn_add_channels_40905 isdn_add_channels 3 40905 NULL
108161 +gfs2_ea_find_40913 gfs2_ea_find 0 40913 NULL
108162 +vol_cdev_write_40915 vol_cdev_write 3 40915 NULL
108163 +snd_vx_create_40948 snd_vx_create 4 40948 NULL nohasharray
108164 +sg_alloc_table_40948 sg_alloc_table 0 40948 &snd_vx_create_40948
108165 +wm8994_free_irq_40951 wm8994_free_irq 2 40951 NULL
108166 +rds_sendmsg_40976 rds_sendmsg 4 40976 NULL
108167 +btrfs_find_orphan_item_40977 btrfs_find_orphan_item 0 40977 NULL
108168 +insert_old_idx_40987 insert_old_idx 0 40987 NULL
108169 +il_dbgfs_fh_reg_read_40993 il_dbgfs_fh_reg_read 3 40993 NULL
108170 +mac80211_format_buffer_41010 mac80211_format_buffer 2 41010 NULL
108171 +mtd_block_isbad_41015 mtd_block_isbad 0 41015 NULL
108172 +__proc_dobitmasks_41029 __proc_dobitmasks 5 41029 NULL
108173 +_req_append_segment_41031 _req_append_segment 2 41031 NULL
108174 +mISDN_sock_sendmsg_41035 mISDN_sock_sendmsg 4 41035 NULL
108175 +ocfs2_xattr_index_block_find_41040 ocfs2_xattr_index_block_find 0 41040 NULL
108176 +lprocfs_write_frac_helper_41050 lprocfs_write_frac_helper 2 41050 NULL
108177 +BcmFlash2xBulkWrite_41054 BcmFlash2xBulkWrite 0 41054 NULL
108178 +calculate_order_41061 calculate_order 0 41061 NULL
108179 +vfs_listxattr_41062 vfs_listxattr 0 41062 NULL nohasharray
108180 +beacon_filtering_write_41062 beacon_filtering_write 3 41062 &vfs_listxattr_41062
108181 +cfg80211_inform_bss_frame_41078 cfg80211_inform_bss_frame 4 41078 NULL
108182 +roccat_read_41093 roccat_read 3 41093 NULL nohasharray
108183 +nvme_map_user_pages_41093 nvme_map_user_pages 4-3 41093 &roccat_read_41093
108184 +dma_attach_41094 dma_attach 6-5 41094 NULL
108185 +provide_user_output_41105 provide_user_output 3 41105 NULL
108186 +f_audio_buffer_alloc_41110 f_audio_buffer_alloc 1 41110 NULL
108187 +ath10k_read_wmi_services_41112 ath10k_read_wmi_services 3 41112 NULL
108188 +ocfs2_extend_trans_41116 ocfs2_extend_trans 0 41116 NULL
108189 +btrfs_subvolume_reserve_metadata_41130 btrfs_subvolume_reserve_metadata 3 41130 NULL
108190 +v4l2_ctrl_new_int_menu_41151 v4l2_ctrl_new_int_menu 4 41151 NULL
108191 +tx_frag_mpdu_alloc_failed_read_41167 tx_frag_mpdu_alloc_failed_read 3 41167 NULL
108192 +dvb_ca_write_41171 dvb_ca_write 3 41171 NULL
108193 +ol_quota_chunk_block_41177 ol_quota_chunk_block 0-2 41177 NULL
108194 +dgap_driver_kzmalloc_41189 dgap_driver_kzmalloc 1 41189 NULL
108195 +compat_sys_process_vm_writev_41194 compat_sys_process_vm_writev 3-5 41194 NULL
108196 +dfs_file_write_41196 dfs_file_write 3 41196 NULL
108197 +ocfs2_read_quota_block_41207 ocfs2_read_quota_block 2 41207 NULL
108198 +nfs_page_array_len_41219 nfs_page_array_len 0-2-1 41219 NULL
108199 +cfg80211_process_disassoc_41231 cfg80211_process_disassoc 3 41231 NULL
108200 +hiddev_compat_ioctl_41255 hiddev_compat_ioctl 2 41255 NULL
108201 +create_dir_41256 create_dir 0 41256 NULL
108202 +erst_read_41260 erst_read 0 41260 NULL
108203 +setup_cluster_bitmap_41270 setup_cluster_bitmap 4-0 41270 NULL
108204 +alloc_context_41283 alloc_context 1 41283 NULL
108205 +objio_alloc_io_state_41316 objio_alloc_io_state 7-6 41316 NULL
108206 +o2hb_setup_one_bio_41341 o2hb_setup_one_bio 4 41341 NULL
108207 +twl_change_queue_depth_41342 twl_change_queue_depth 2 41342 NULL
108208 +rtw_android_set_block_41347 rtw_android_set_block 0 41347 NULL
108209 +irq_expand_nr_irqs_41351 irq_expand_nr_irqs 0 41351 NULL
108210 +cnic_init_id_tbl_41354 cnic_init_id_tbl 2 41354 NULL
108211 +jbd2_alloc_41359 jbd2_alloc 1 41359 NULL
108212 +kmp_init_41373 kmp_init 2 41373 NULL
108213 +netlink_alloc_large_skb_41375 netlink_alloc_large_skb 1 41375 NULL
108214 +isr_commands_read_41398 isr_commands_read 3 41398 NULL
108215 +is_writethrough_io_41406 is_writethrough_io 3 41406 NULL
108216 +rx_defrag_decrypt_failed_read_41411 rx_defrag_decrypt_failed_read 3 41411 NULL
108217 +xfs_iext_add_41422 xfs_iext_add 3 41422 NULL
108218 +isdn_ppp_fill_rq_41428 isdn_ppp_fill_rq 2 41428 NULL
108219 +lbs_rdrf_read_41431 lbs_rdrf_read 3 41431 NULL
108220 +iio_device_alloc_41440 iio_device_alloc 1 41440 NULL
108221 +ntfs_file_buffered_write_41442 ntfs_file_buffered_write 6-4 41442 NULL
108222 +pcpu_build_alloc_info_41443 pcpu_build_alloc_info 1-2-3 41443 NULL
108223 +layout_leb_in_gaps_41470 layout_leb_in_gaps 0 41470 NULL
108224 +snd_pcm_status_41472 snd_pcm_status 0 41472 NULL
108225 +rt2x00debug_write_rfcsr_41473 rt2x00debug_write_rfcsr 3 41473 NULL
108226 +bl_alloc_init_bio_41478 bl_alloc_init_bio 1 41478 NULL
108227 +split_item_41483 split_item 0 41483 NULL
108228 +kvm_unmap_hva_range_41484 kvm_unmap_hva_range 3-2 41484 NULL
108229 +ipath_user_sdma_page_length_41490 ipath_user_sdma_page_length 0-2-1 41490 NULL
108230 +wep_interrupt_read_41492 wep_interrupt_read 3 41492 NULL
108231 +SyS_get_mempolicy_41495 SyS_get_mempolicy 3-4 41495 NULL
108232 +hpfs_translate_name_41497 hpfs_translate_name 3 41497 NULL
108233 +xfrm_hash_new_size_41505 xfrm_hash_new_size 0-1 41505 NULL
108234 +perf_sw_event_41517 perf_sw_event 2 41517 NULL
108235 +SyS_preadv_41523 SyS_preadv 3 41523 NULL
108236 +dm_get_reserved_rq_based_ios_41529 dm_get_reserved_rq_based_ios 0 41529 NULL
108237 +add_excluded_extent_41547 add_excluded_extent 0 41547 NULL
108238 +tx_tx_frame_checksum_read_41553 tx_tx_frame_checksum_read 3 41553 NULL
108239 +ath6kl_endpoint_stats_read_41554 ath6kl_endpoint_stats_read 3 41554 NULL
108240 +si476x_radio_fops_read_41559 si476x_radio_fops_read 3 41559 NULL nohasharray
108241 +nr_status_frames_41559 nr_status_frames 0-1 41559 &si476x_radio_fops_read_41559
108242 +rng_dev_read_41581 rng_dev_read 3 41581 NULL
108243 +vga_io_r_41609 vga_io_r 0 41609 NULL
108244 +tcp_hdrlen_41610 tcp_hdrlen 0 41610 NULL
108245 +lbs_bcnmiss_write_41613 lbs_bcnmiss_write 3 41613 NULL nohasharray
108246 +usb_endpoint_maxp_41613 usb_endpoint_maxp 0 41613 &lbs_bcnmiss_write_41613
108247 +a2mp_send_41615 a2mp_send 4 41615 NULL
108248 +btrfs_calc_trunc_metadata_size_41626 btrfs_calc_trunc_metadata_size 0-2 41626 NULL
108249 +lstcon_batch_list_41627 lstcon_batch_list 2 41627 NULL
108250 +mempool_create_kmalloc_pool_41650 mempool_create_kmalloc_pool 1 41650 NULL
108251 +rx_rx_pre_complt_read_41653 rx_rx_pre_complt_read 3 41653 NULL
108252 +get_std_timing_41654 get_std_timing 0 41654 NULL
108253 +start_graph_tracing_41656 start_graph_tracing 0 41656 NULL
108254 +ieee80211_if_fmt_bssid_41677 ieee80211_if_fmt_bssid 3 41677 NULL
108255 +params_period_bytes_41683 params_period_bytes 0 41683 NULL
108256 +fill_pcm_stream_name_41685 fill_pcm_stream_name 2 41685 NULL
108257 +lov_unpackmd_41701 lov_unpackmd 4 41701 NULL
108258 +__ext3_journal_get_write_access_41705 __ext3_journal_get_write_access 0 41705 NULL
108259 +apei_exec_for_each_entry_41717 apei_exec_for_each_entry 0 41717 NULL
108260 +bdx_tx_db_init_41719 bdx_tx_db_init 2 41719 NULL
108261 +ocfs2_block_group_fill_41744 ocfs2_block_group_fill 0 41744 NULL
108262 +fillonedir_41746 fillonedir 3 41746 NULL
108263 +ocfs2_dx_dir_rebalance_41793 ocfs2_dx_dir_rebalance 7 41793 NULL
108264 +iwl_dbgfs_bt_notif_read_41794 iwl_dbgfs_bt_notif_read 3 41794 NULL
108265 +remap_pte_range_41800 remap_pte_range 5 41800 NULL
108266 +hsi_alloc_controller_41802 hsi_alloc_controller 1 41802 NULL
108267 +regcache_sync_block_raw_41803 regcache_sync_block_raw 4-3 41803 NULL
108268 +rtw_android_get_macaddr_41812 rtw_android_get_macaddr 0 41812 NULL
108269 +da9052_enable_irq_41814 da9052_enable_irq 2 41814 NULL
108270 +sco_send_frame_41815 sco_send_frame 3 41815 NULL
108271 +lp_gpio_to_irq_41822 lp_gpio_to_irq 2 41822 NULL
108272 +ixgbe_dbg_netdev_ops_read_41839 ixgbe_dbg_netdev_ops_read 3 41839 NULL
108273 +do_ip_setsockopt_41852 do_ip_setsockopt 5 41852 NULL
108274 +keyctl_instantiate_key_41855 keyctl_instantiate_key 3 41855 NULL
108275 +pci_map_single_41869 pci_map_single 0 41869 NULL
108276 +usb_gadget_get_string_41871 usb_gadget_get_string 0 41871 NULL
108277 +v_APCI3120_InterruptDmaMoveBlock16bit_41914 v_APCI3120_InterruptDmaMoveBlock16bit 4 41914 NULL
108278 +get_fdb_entries_41916 get_fdb_entries 3 41916 NULL
108279 +find_ge_pid_41918 find_ge_pid 1 41918 NULL
108280 +build_inv_iotlb_pages_41922 build_inv_iotlb_pages 4-5 41922 NULL
108281 +nfsd_getxattr_41934 nfsd_getxattr 0 41934 NULL
108282 +ext4_da_write_inline_data_begin_41935 ext4_da_write_inline_data_begin 3-4 41935 NULL
108283 +read_gssp_41947 read_gssp 3 41947 NULL
108284 +ocfs2_xattr_bucket_get_name_value_41949 ocfs2_xattr_bucket_get_name_value 0 41949 NULL
108285 +portnames_read_41958 portnames_read 3 41958 NULL
108286 +ubi_self_check_all_ff_41959 ubi_self_check_all_ff 0-4 41959 NULL
108287 +dst_mtu_41969 dst_mtu 0 41969 NULL
108288 +btrfs_check_trunc_cache_free_space_41973 btrfs_check_trunc_cache_free_space 0 41973 NULL
108289 +cx24116_writeregN_41975 cx24116_writeregN 4 41975 NULL
108290 +ubi_io_is_bad_41983 ubi_io_is_bad 0 41983 NULL
108291 +pool_allocate_42012 pool_allocate 3 42012 NULL
108292 +spidev_sync_read_42014 spidev_sync_read 0 42014 NULL
108293 +rs_sta_dbgfs_scale_table_write_42017 rs_sta_dbgfs_scale_table_write 3 42017 NULL
108294 +nouveau_ttm_tt_create_42026 nouveau_ttm_tt_create 2 42026 NULL
108295 +acpi_ut_create_buffer_object_42030 acpi_ut_create_buffer_object 1 42030 NULL
108296 +__btrfs_drop_extents_42032 __btrfs_drop_extents 5-0 42032 NULL
108297 +__hwahc_op_set_gtk_42038 __hwahc_op_set_gtk 4 42038 NULL
108298 +irda_sendmsg_ultra_42047 irda_sendmsg_ultra 4 42047 NULL
108299 +dma_generic_alloc_coherent_42048 dma_generic_alloc_coherent 2 42048 NULL nohasharray
108300 +jffs2_do_link_42048 jffs2_do_link 6 42048 &dma_generic_alloc_coherent_42048
108301 +ps_poll_upsd_max_ap_turn_read_42050 ps_poll_upsd_max_ap_turn_read 3 42050 NULL
108302 +InterfaceTransmitPacket_42058 InterfaceTransmitPacket 3 42058 NULL
108303 +alloc_bitset_42085 alloc_bitset 1 42085 NULL
108304 +scsi_execute_req_42088 scsi_execute_req 5 42088 NULL
108305 +sk_chk_filter_42095 sk_chk_filter 2 42095 NULL
108306 +send_truncate_42107 send_truncate 0 42107 NULL
108307 +submit_inquiry_42108 submit_inquiry 3 42108 NULL
108308 +sysfs_read_file_42113 sysfs_read_file 3 42113 NULL nohasharray
108309 +dw_dma_cyclic_prep_42113 dw_dma_cyclic_prep 3-4 42113 &sysfs_read_file_42113
108310 +__btrfs_unlink_inode_42114 __btrfs_unlink_inode 0 42114 NULL
108311 +ext4_do_update_inode_42127 ext4_do_update_inode 0 42127 NULL
108312 +blk_ioctl_zeroout_42160 blk_ioctl_zeroout 3 42160 NULL
108313 +mmc_align_data_size_42161 mmc_align_data_size 0-2 42161 NULL
108314 +kvm_lapic_get_cr8_42166 kvm_lapic_get_cr8 0 42166 NULL
108315 +read_file_base_eeprom_42168 read_file_base_eeprom 3 42168 NULL
108316 +oprofilefs_str_to_user_42182 oprofilefs_str_to_user 3 42182 NULL
108317 +write_file_beacon_42185 write_file_beacon 3 42185 NULL
108318 +get_znodes_to_commit_42201 get_znodes_to_commit 0 42201 NULL
108319 +ocfs2_resv_window_bits_42207 ocfs2_resv_window_bits 0 42207 NULL
108320 +pla_ocp_read_42235 pla_ocp_read 3 42235 NULL
108321 +rx_defrag_need_decrypt_read_42253 rx_defrag_need_decrypt_read 3 42253 NULL
108322 +vfio_remove_dma_overlap_42255 vfio_remove_dma_overlap 2 42255 NULL
108323 +__pcpu_size_to_slot_42271 __pcpu_size_to_slot 0 42271 NULL
108324 +snd_pcm_hw_param_value_max_42280 snd_pcm_hw_param_value_max 0 42280 NULL
108325 +__cpus_weight_42299 __cpus_weight 2-0 42299 NULL
108326 +sel_read_perm_42302 sel_read_perm 3 42302 NULL
108327 +sctp_setsockopt_del_key_42304 sctp_setsockopt_del_key 3 42304 NULL nohasharray
108328 +ulong_read_file_42304 ulong_read_file 3 42304 &sctp_setsockopt_del_key_42304
108329 +gfn_to_hva_42305 gfn_to_hva 2-0 42305 NULL
108330 +close_cur_inode_file_42308 close_cur_inode_file 0 42308 NULL nohasharray
108331 +xfs_vm_readpages_42308 xfs_vm_readpages 4 42308 &close_cur_inode_file_42308
108332 +free_cblock_42318 free_cblock 2 42318 NULL
108333 +hysdn_conf_read_42324 hysdn_conf_read 3 42324 NULL
108334 +tcp_sync_mss_42330 tcp_sync_mss 2-0 42330 NULL
108335 +snd_pcm_plug_alloc_42339 snd_pcm_plug_alloc 2 42339 NULL
108336 +ide_raw_taskfile_42355 ide_raw_taskfile 4 42355 NULL
108337 +drbd_md_last_sector_42378 drbd_md_last_sector 0 42378 NULL
108338 +il_dbgfs_disable_ht40_read_42386 il_dbgfs_disable_ht40_read 3 42386 NULL
108339 +hash_ipportnet4_expire_42391 hash_ipportnet4_expire 3 42391 NULL
108340 +msnd_fifo_read_42406 msnd_fifo_read 0-3 42406 NULL
108341 +krng_get_random_42420 krng_get_random 3 42420 NULL
108342 +gsm_data_alloc_42437 gsm_data_alloc 3 42437 NULL
108343 +key_conf_keyidx_read_42443 key_conf_keyidx_read 3 42443 NULL
108344 +alloc_request_42448 alloc_request 0 42448 NULL
108345 +snd_pcm_action_group_42452 snd_pcm_action_group 0 42452 NULL
108346 +tcm_loop_change_queue_depth_42454 tcm_loop_change_queue_depth 2 42454 NULL
108347 +kuc_free_42455 kuc_free 2 42455 NULL
108348 +tc3589x_gpio_irq_get_virq_42457 tc3589x_gpio_irq_get_virq 2 42457 NULL
108349 +ext3_valid_block_bitmap_42459 ext3_valid_block_bitmap 3 42459 NULL
108350 +__simple_xattr_set_42474 __simple_xattr_set 4 42474 NULL
108351 +follow_hugetlb_page_42486 follow_hugetlb_page 0-7 42486 NULL
108352 +omfs_readpages_42490 omfs_readpages 4 42490 NULL
108353 +bypass_write_42498 bypass_write 3 42498 NULL
108354 +SyS_mincore_42511 SyS_mincore 1-2 42511 NULL
108355 +jbd2_log_wait_commit_42519 jbd2_log_wait_commit 0 42519 NULL
108356 +kvm_write_wall_clock_42520 kvm_write_wall_clock 2 42520 NULL
108357 +dio_bio_complete_42524 dio_bio_complete 0 42524 NULL
108358 +smk_write_netlbladdr_42525 smk_write_netlbladdr 3 42525 NULL
108359 +self_check_ec_hdr_42528 self_check_ec_hdr 0 42528 NULL
108360 +__register_ftrace_function_42543 __register_ftrace_function 0 42543 NULL
108361 +dbAllocNear_42546 dbAllocNear 0 42546 NULL
108362 +inorder_next_42548 inorder_next 1-0 42548 NULL
108363 +ath6kl_wmi_proc_events_vif_42549 ath6kl_wmi_proc_events_vif 5 42549 NULL
108364 +udp_recvmsg_42558 udp_recvmsg 4 42558 NULL
108365 +iwl_print_event_log_42566 iwl_print_event_log 0-5-7 42566 NULL
108366 +ocfs2_reserve_suballoc_bits_42569 ocfs2_reserve_suballoc_bits 0 42569 NULL
108367 +xfrm_new_hash_mask_42579 xfrm_new_hash_mask 0-1 42579 NULL
108368 +oom_score_adj_write_42594 oom_score_adj_write 3 42594 NULL
108369 +map_state_42602 map_state 1 42602 NULL
108370 +resp_write_42628 resp_write 2 42628 NULL
108371 +ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout_42635 ieee80211_if_fmt_dot11MeshHWMPactivePathTimeout 3 42635 NULL
108372 +scsi_activate_tcq_42640 scsi_activate_tcq 2 42640 NULL
108373 +br_mdb_rehash_42643 br_mdb_rehash 2 42643 NULL
108374 +parport_pc_compat_write_block_pio_42644 parport_pc_compat_write_block_pio 3 42644 NULL
108375 +ocfs2_search_chain_42655 ocfs2_search_chain 0 42655 NULL
108376 +request_key_and_link_42693 request_key_and_link 4 42693 NULL
108377 +acpi_dev_get_irqresource_42694 acpi_dev_get_irqresource 2 42694 NULL
108378 +vb2_read_42703 vb2_read 3 42703 NULL
108379 +__ocfs2_decrease_refcount_42717 __ocfs2_decrease_refcount 5-4-0 42717 NULL
108380 +read_status_42722 read_status 0 42722 NULL
108381 +dvb_demux_ioctl_42733 dvb_demux_ioctl 2 42733 NULL
108382 +set_aoe_iflist_42737 set_aoe_iflist 2 42737 NULL
108383 +ax25_setsockopt_42740 ax25_setsockopt 5 42740 NULL
108384 +xen_bind_pirq_gsi_to_irq_42750 xen_bind_pirq_gsi_to_irq 1 42750 NULL
108385 +dpm_sysfs_add_42756 dpm_sysfs_add 0 42756 NULL
108386 +x25_recvmsg_42777 x25_recvmsg 4 42777 NULL
108387 +snd_midi_event_decode_42780 snd_midi_event_decode 0 42780 NULL
108388 +cryptd_hash_setkey_42781 cryptd_hash_setkey 3 42781 NULL nohasharray
108389 +isku_sysfs_read_info_42781 isku_sysfs_read_info 6 42781 &cryptd_hash_setkey_42781
108390 +elfcorehdr_read_notes_42786 elfcorehdr_read_notes 2 42786 NULL
108391 +koneplus_sysfs_read_42792 koneplus_sysfs_read 6 42792 NULL
108392 +ntfs_attr_extend_allocation_42796 ntfs_attr_extend_allocation 0-2-3 42796 NULL
108393 +fw_device_op_compat_ioctl_42804 fw_device_op_compat_ioctl 2 42804 NULL
108394 +drm_ioctl_42813 drm_ioctl 2 42813 NULL
108395 +iwl_dbgfs_ucode_bt_stats_read_42820 iwl_dbgfs_ucode_bt_stats_read 3 42820 NULL
108396 +xfs_iomap_eof_prealloc_initial_size_42822 xfs_iomap_eof_prealloc_initial_size 0-3 42822 NULL
108397 +set_arg_42824 set_arg 3 42824 NULL
108398 +si476x_radio_read_rsq_blob_42827 si476x_radio_read_rsq_blob 3 42827 NULL
108399 +ocfs2_desc_bitmap_to_cluster_off_42831 ocfs2_desc_bitmap_to_cluster_off 2 42831 NULL
108400 +prandom_u32_42853 prandom_u32 0 42853 NULL
108401 +ntfs_mapping_pairs_build_42859 ntfs_mapping_pairs_build 0 42859 NULL
108402 +nouveau_vm_create_42869 nouveau_vm_create 3-2 42869 NULL
108403 +ocfs2_clusters_for_bytes_42872 ocfs2_clusters_for_bytes 0-2 42872 NULL
108404 +nvme_trans_unit_serial_page_42879 nvme_trans_unit_serial_page 4 42879 NULL
108405 +xpc_kmalloc_cacheline_aligned_42895 xpc_kmalloc_cacheline_aligned 1 42895 NULL
108406 +hd_end_request_42904 hd_end_request 2 42904 NULL
108407 +sta_last_rx_rate_read_42909 sta_last_rx_rate_read 3 42909 NULL
108408 +sctp_getsockopt_maxburst_42941 sctp_getsockopt_maxburst 2 42941 NULL
108409 +get_unmapped_area_42944 get_unmapped_area 0 42944 NULL
108410 +vx_reset_chk_42946 vx_reset_chk 0 42946 NULL
108411 +send_link_42948 send_link 0 42948 NULL
108412 +blkdev_direct_IO_42962 blkdev_direct_IO 4 42962 NULL
108413 +read_file_node_stat_42964 read_file_node_stat 3 42964 NULL
108414 +compat_udpv6_setsockopt_42981 compat_udpv6_setsockopt 5 42981 NULL
108415 +nfs_idmap_get_desc_42990 nfs_idmap_get_desc 2-4 42990 NULL nohasharray
108416 +rtw_os_xmit_resource_alloc_42990 rtw_os_xmit_resource_alloc 3 42990 &nfs_idmap_get_desc_42990
108417 +mlx4_qp_reserve_range_43000 mlx4_qp_reserve_range 2-3 43000 NULL
108418 +isr_rx_mem_overflow_read_43025 isr_rx_mem_overflow_read 3 43025 NULL
108419 +add_bytes_to_bitmap_43026 add_bytes_to_bitmap 0-3-4 43026 NULL
108420 +init_phys_status_page_43028 init_phys_status_page 0 43028 NULL
108421 +wep_default_key_count_read_43035 wep_default_key_count_read 3 43035 NULL
108422 +send_to_group_43051 send_to_group 0 43051 NULL
108423 +nouveau_gpuobj_create__43072 nouveau_gpuobj_create_ 9 43072 NULL
108424 +nfs_map_group_to_gid_43082 nfs_map_group_to_gid 3 43082 NULL
108425 +ieee80211_if_fmt_drop_unencrypted_43107 ieee80211_if_fmt_drop_unencrypted 3 43107 NULL
108426 +calculate_node_totalpages_43118 calculate_node_totalpages 2-3 43118 NULL
108427 +cow_file_range_inline_43132 cow_file_range_inline 4 43132 NULL
108428 +read_file_dfs_43145 read_file_dfs 3 43145 NULL
108429 +cfs_cpt_table_alloc_43159 cfs_cpt_table_alloc 1 43159 NULL
108430 +usb_string_sub_43164 usb_string_sub 0 43164 NULL
108431 +il_dbgfs_power_save_status_read_43165 il_dbgfs_power_save_status_read 3 43165 NULL
108432 +send_cmd_43168 send_cmd 0 43168 NULL
108433 +ath6kl_set_assoc_req_ies_43185 ath6kl_set_assoc_req_ies 3 43185 NULL
108434 +ext4_xattr_ibody_get_43200 ext4_xattr_ibody_get 0 43200 NULL
108435 +uio_write_43202 uio_write 3 43202 NULL
108436 +iso_callback_43208 iso_callback 3 43208 NULL
108437 +ath10k_p2p_calc_noa_ie_len_43209 ath10k_p2p_calc_noa_ie_len 0 43209 NULL
108438 +f2fs_acl_from_disk_43210 f2fs_acl_from_disk 2 43210 NULL
108439 +atomic_long_add_return_43217 atomic_long_add_return 1-0 43217 NULL
108440 +vmemmap_alloc_block_43245 vmemmap_alloc_block 1 43245 NULL
108441 +fixup_leb_43256 fixup_leb 3 43256 NULL
108442 +ide_end_rq_43269 ide_end_rq 4 43269 NULL
108443 +nilfs_direct_IO_43271 nilfs_direct_IO 4 43271 NULL
108444 +mlx5_ib_reg_user_mr_43275 mlx5_ib_reg_user_mr 2-3 43275 NULL
108445 +parport_pc_ecp_write_block_pio_43278 parport_pc_ecp_write_block_pio 3 43278 NULL nohasharray
108446 +evtchn_write_43278 evtchn_write 3 43278 &parport_pc_ecp_write_block_pio_43278
108447 +filemap_write_and_wait_range_43279 filemap_write_and_wait_range 0 43279 NULL
108448 +mpage_alloc_43299 mpage_alloc 3 43299 NULL
108449 +get_nr_irqs_gsi_43315 get_nr_irqs_gsi 0 43315 NULL
108450 +mmu_set_spte_43327 mmu_set_spte 7-6 43327 NULL
108451 +__ext4_get_inode_loc_43332 __ext4_get_inode_loc 0 43332 NULL
108452 +kvm_host_page_size_43348 kvm_host_page_size 2-0 43348 NULL
108453 +activation_descriptor_init_43358 activation_descriptor_init 1 43358 NULL
108454 +gart_free_coherent_43362 gart_free_coherent 4-2 43362 NULL
108455 +hash_net4_expire_43378 hash_net4_expire 3 43378 NULL
108456 +xenfb_write_43412 xenfb_write 3 43412 NULL
108457 +__alloc_bootmem_low_43423 __alloc_bootmem_low 1 43423 NULL
108458 +usb_alloc_urb_43436 usb_alloc_urb 1 43436 NULL
108459 +ath6kl_wmi_roam_tbl_event_rx_43440 ath6kl_wmi_roam_tbl_event_rx 3 43440 NULL
108460 +ocfs2_rotate_tree_left_43442 ocfs2_rotate_tree_left 0 43442 NULL
108461 +usemap_size_43443 usemap_size 0-2-1 43443 NULL nohasharray
108462 +usb_string_43443 usb_string 0 43443 &usemap_size_43443
108463 +get_vm_area_size_43444 get_vm_area_size 0 43444 NULL
108464 +nvme_trans_device_id_page_43466 nvme_trans_device_id_page 4 43466 NULL
108465 +calculate_discard_block_size_43480 calculate_discard_block_size 0 43480 NULL nohasharray
108466 +alloc_new_reservation_43480 alloc_new_reservation 4-0-2 43480 &calculate_discard_block_size_43480
108467 +tx_tx_data_prepared_read_43497 tx_tx_data_prepared_read 3 43497 NULL
108468 +ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime_43505 ieee80211_if_fmt_dot11MeshHWMPnetDiameterTraversalTime 3 43505 NULL
108469 +do_readlink_43518 do_readlink 2 43518 NULL
108470 +dvb_ca_en50221_io_write_43533 dvb_ca_en50221_io_write 3 43533 NULL
108471 +read_events_43534 read_events 3 43534 NULL
108472 +cachefiles_daemon_write_43535 cachefiles_daemon_write 3 43535 NULL
108473 +tx_frag_failed_read_43540 tx_frag_failed_read 3 43540 NULL nohasharray
108474 +ufs_alloccg_block_43540 ufs_alloccg_block 3-0 43540 &tx_frag_failed_read_43540
108475 +request_resource_43548 request_resource 0 43548 NULL
108476 +rpc_malloc_43573 rpc_malloc 2 43573 NULL
108477 +handle_frequent_errors_43599 handle_frequent_errors 4 43599 NULL
108478 +lpfc_idiag_drbacc_read_reg_43606 lpfc_idiag_drbacc_read_reg 0-3 43606 NULL
108479 +proc_read_43614 proc_read 3 43614 NULL
108480 +i915_gem_execbuffer_relocate_object_slow_43618 i915_gem_execbuffer_relocate_object_slow 0 43618 NULL nohasharray
108481 +disable_dma_on_even_43618 disable_dma_on_even 0 43618 &i915_gem_execbuffer_relocate_object_slow_43618
108482 +alloc_thread_groups_43625 alloc_thread_groups 2 43625 NULL
108483 +random_write_43656 random_write 3 43656 NULL
108484 +bio_integrity_tag_43658 bio_integrity_tag 3 43658 NULL
108485 +ext4_acl_count_43659 ext4_acl_count 0-1 43659 NULL
108486 +fs_path_copy_43673 fs_path_copy 0 43673 NULL
108487 +dmam_declare_coherent_memory_43679 dmam_declare_coherent_memory 4 43679 NULL
108488 +calgary_map_page_43686 calgary_map_page 4 43686 NULL
108489 +max77693_bulk_write_43698 max77693_bulk_write 2-3 43698 NULL
108490 +wait_for_completion_interruptible_43723 wait_for_completion_interruptible 0 43723 NULL
108491 +drbd_md_first_sector_43729 drbd_md_first_sector 0 43729 NULL
108492 +reset_card_proc_43731 reset_card_proc 0 43731 NULL
108493 +snd_rme32_playback_copy_43732 snd_rme32_playback_copy 5 43732 NULL
108494 +ocfs2_replace_clusters_43733 ocfs2_replace_clusters 5-0 43733 NULL
108495 +fuse_conn_congestion_threshold_write_43736 fuse_conn_congestion_threshold_write 3 43736 NULL
108496 +__bm_find_next_43748 __bm_find_next 2 43748 NULL
108497 +gigaset_initcs_43753 gigaset_initcs 2 43753 NULL
108498 +sctp_setsockopt_active_key_43755 sctp_setsockopt_active_key 3 43755 NULL
108499 +ocfs2_xattr_get_value_outside_43787 ocfs2_xattr_get_value_outside 0 43787 NULL nohasharray
108500 +byte_pos_43787 byte_pos 0-2 43787 &ocfs2_xattr_get_value_outside_43787
108501 +btrfs_copy_from_user_43806 btrfs_copy_from_user 0-3-1 43806 NULL
108502 +ext4_read_block_bitmap_43814 ext4_read_block_bitmap 2 43814 NULL
108503 +div64_u64_safe_43815 div64_u64_safe 2-1 43815 NULL
108504 +ext4_split_extent_43818 ext4_split_extent 0 43818 NULL
108505 +i915_gem_execbuffer_relocate_entry_43822 i915_gem_execbuffer_relocate_entry 0 43822 NULL
108506 +ieee80211_if_fmt_element_ttl_43825 ieee80211_if_fmt_element_ttl 3 43825 NULL
108507 +ieee80211_alloc_hw_43829 ieee80211_alloc_hw 1 43829 NULL
108508 +read_flush_43851 read_flush 3 43851 NULL
108509 +ocfs2_block_group_find_clear_bits_43874 ocfs2_block_group_find_clear_bits 4 43874 NULL
108510 +pm860x_bulk_write_43875 pm860x_bulk_write 2-3 43875 NULL
108511 +SendString_43928 SendString 3 43928 NULL
108512 +xen_register_gsi_43946 xen_register_gsi 2-1 43946 NULL
108513 +stats_dot11RTSFailureCount_read_43948 stats_dot11RTSFailureCount_read 3 43948 NULL
108514 +perf_tp_event_43971 perf_tp_event 2 43971 NULL
108515 +__get_required_blob_size_43980 __get_required_blob_size 0-3-2 43980 NULL
108516 +nla_reserve_43984 nla_reserve 3 43984 NULL
108517 +__clkdev_alloc_43990 __clkdev_alloc 1 43990 NULL
108518 +scsi_command_size_43992 scsi_command_size 0 43992 NULL nohasharray
108519 +kvm_read_guest_virt_43992 kvm_read_guest_virt 4-2 43992 &scsi_command_size_43992 nohasharray
108520 +bcm_recvmsg_43992 bcm_recvmsg 4 43992 &kvm_read_guest_virt_43992
108521 +ulist_add_43994 ulist_add 0 43994 NULL
108522 +emit_flags_44006 emit_flags 4-3 44006 NULL
108523 +write_flush_procfs_44011 write_flush_procfs 3 44011 NULL
108524 +fru_strlen_44046 fru_strlen 0 44046 NULL
108525 +tree_insert_offset_44069 tree_insert_offset 0 44069 NULL
108526 +SYSC_add_key_44079 SYSC_add_key 4 44079 NULL
108527 +btrfs_prev_leaf_44083 btrfs_prev_leaf 0 44083 NULL nohasharray
108528 +load_discard_44083 load_discard 3 44083 &btrfs_prev_leaf_44083
108529 +__vxge_hw_vpath_tim_configure_44093 __vxge_hw_vpath_tim_configure 2 44093 NULL
108530 +xlog_recover_add_to_cont_trans_44102 xlog_recover_add_to_cont_trans 4 44102 NULL
108531 +skb_frag_dma_map_44112 skb_frag_dma_map 0 44112 NULL
108532 +tracing_set_trace_read_44122 tracing_set_trace_read 3 44122 NULL
108533 +hwif_to_node_44127 hwif_to_node 0 44127 NULL
108534 +SyS_process_vm_writev_44129 SyS_process_vm_writev 3-5 44129 NULL
108535 +vmw_gmr_bind_44130 vmw_gmr_bind 3 44130 NULL
108536 +lookup_extent_data_ref_44136 lookup_extent_data_ref 0 44136 NULL
108537 +ttm_get_pages_44142 ttm_get_pages 2 44142 NULL
108538 +scsi_get_resid_44147 scsi_get_resid 0 44147 NULL
108539 +ubifs_find_dirty_idx_leb_44169 ubifs_find_dirty_idx_leb 0 44169 NULL
108540 +ocfs2_xattr_bucket_find_44174 ocfs2_xattr_bucket_find 0 44174 NULL
108541 +SYSC_set_mempolicy_44176 SYSC_set_mempolicy 3 44176 NULL
108542 +readreg_ipac_44186 readreg_ipac 0 44186 NULL
108543 +handle_eviocgbit_44193 handle_eviocgbit 3 44193 NULL
108544 +IO_APIC_get_PCI_irq_vector_44198 IO_APIC_get_PCI_irq_vector 0 44198 NULL
108545 +__set_free_44211 __set_free 2 44211 NULL
108546 +claim_ptd_buffers_44213 claim_ptd_buffers 3 44213 NULL
108547 +srp_alloc_iu_44227 srp_alloc_iu 2 44227 NULL
108548 +ioapic_register_intr_44238 ioapic_register_intr 1 44238 NULL
108549 +scsi_track_queue_full_44239 scsi_track_queue_full 2 44239 NULL
108550 +sigma_action_write_regmap_44240 sigma_action_write_regmap 3 44240 NULL
108551 +tc3589x_gpio_irq_map_44245 tc3589x_gpio_irq_map 2 44245 NULL
108552 +apei_resources_sub_44252 apei_resources_sub 0 44252 NULL
108553 +byt_gpio_irq_map_44275 byt_gpio_irq_map 2 44275 NULL
108554 +device_create_file_44285 device_create_file 0 44285 NULL
108555 +ufs_clusteracct_44293 ufs_clusteracct 3 44293 NULL
108556 +ocfs2_zero_range_for_truncate_44294 ocfs2_zero_range_for_truncate 3 44294 NULL
108557 +ath6kl_keepalive_read_44303 ath6kl_keepalive_read 3 44303 NULL
108558 +bitmap_scnprintf_44318 bitmap_scnprintf 0-2 44318 NULL
108559 +dispatch_proc_write_44320 dispatch_proc_write 3 44320 NULL
108560 +rs_init_44327 rs_init 1 44327 NULL
108561 +create_free_space_inode_44332 create_free_space_inode 0 44332 NULL
108562 +insert_into_bitmap_44340 insert_into_bitmap 0 44340 NULL
108563 +ubi_eba_write_leb_st_44343 ubi_eba_write_leb_st 0-5 44343 NULL
108564 +radix_tree_maybe_preload_44346 radix_tree_maybe_preload 0 44346 NULL
108565 +nfs_fscache_get_super_cookie_44355 nfs_fscache_get_super_cookie 3 44355 NULL nohasharray
108566 +blk_queue_init_tags_44355 blk_queue_init_tags 2 44355 &nfs_fscache_get_super_cookie_44355
108567 +__is_discarded_44359 __is_discarded 2 44359 NULL
108568 +ipx_recvmsg_44366 ipx_recvmsg 4 44366 NULL
108569 +alloc_requests_44372 alloc_requests 0 44372 NULL
108570 +rts_threshold_read_44384 rts_threshold_read 3 44384 NULL
108571 +iterate_dir_item_44386 iterate_dir_item 0 44386 NULL
108572 +mtip_hw_read_flags_44396 mtip_hw_read_flags 3 44396 NULL
108573 +aoedev_flush_44398 aoedev_flush 2 44398 NULL
108574 +strlcpy_44400 strlcpy 3 44400 NULL
108575 +drm_buffer_alloc_44405 drm_buffer_alloc 2 44405 NULL
108576 +osst_do_scsi_44410 osst_do_scsi 4 44410 NULL
108577 +check_user_page_hwpoison_44412 check_user_page_hwpoison 1 44412 NULL
108578 +ieee80211_if_read_rc_rateidx_mcs_mask_5ghz_44423 ieee80211_if_read_rc_rateidx_mcs_mask_5ghz 3 44423 NULL
108579 +iwl_dbgfs_bf_params_write_44450 iwl_dbgfs_bf_params_write 3 44450 NULL
108580 +write_file_debug_44476 write_file_debug 3 44476 NULL
108581 +btrfs_chunk_item_size_44478 btrfs_chunk_item_size 0-1 44478 NULL
108582 +sdio_align_size_44489 sdio_align_size 0-2 44489 NULL
108583 +bio_advance_44496 bio_advance 2 44496 NULL
108584 +ieee80211_if_read_dropped_frames_ttl_44500 ieee80211_if_read_dropped_frames_ttl 3 44500 NULL
108585 +ac_register_board_44504 ac_register_board 3 44504 NULL
108586 +security_getprocattr_44505 security_getprocattr 0 44505 NULL nohasharray
108587 +iwl_dbgfs_sram_read_44505 iwl_dbgfs_sram_read 3 44505 &security_getprocattr_44505
108588 +spidev_write_44510 spidev_write 3 44510 NULL
108589 +SyS_io_getevents_44519 SyS_io_getevents 3 44519 NULL
108590 +ieee80211_rx_mgmt_assoc_resp_44525 ieee80211_rx_mgmt_assoc_resp 3 44525 NULL
108591 +comm_write_44537 comm_write 3 44537 NULL
108592 +xfs_log_calc_unit_res_44540 xfs_log_calc_unit_res 0-2 44540 NULL
108593 +snd_pcm_drop_44542 snd_pcm_drop 0 44542 NULL
108594 +dbg_chk_pnode_44555 dbg_chk_pnode 0 44555 NULL
108595 +shmem_getpage_gfp_44556 shmem_getpage_gfp 0 44556 NULL
108596 +hash_ipport4_expire_44564 hash_ipport4_expire 3 44564 NULL
108597 +dgrp_config_proc_write_44571 dgrp_config_proc_write 3 44571 NULL
108598 +snd_pcm_alloc_vmalloc_buffer_44595 snd_pcm_alloc_vmalloc_buffer 2 44595 NULL
108599 +btrfs_qgroup_account_ref_44604 btrfs_qgroup_account_ref 0 44604 NULL
108600 +huge_page_shift_44618 huge_page_shift 0 44618 NULL
108601 +sysfs_add_one_44629 sysfs_add_one 0 44629 NULL
108602 +msb_get_free_block_44633 msb_get_free_block 2 44633 NULL
108603 +ext2_new_block_44645 ext2_new_block 2-0 44645 NULL
108604 +alloc_ctrl_packet_44667 alloc_ctrl_packet 1 44667 NULL
108605 +mpi_resize_44674 mpi_resize 2 44674 NULL
108606 +sysfs_create_link_44685 sysfs_create_link 0 44685 NULL nohasharray
108607 +__copy_user_intel_nocache_44685 __copy_user_intel_nocache 0-3 44685 &sysfs_create_link_44685
108608 +ts_read_44687 ts_read 3 44687 NULL
108609 +qib_get_user_pages_44689 qib_get_user_pages 1-2 44689 NULL
108610 +lov_emerg_alloc_44698 lov_emerg_alloc 1 44698 NULL
108611 +__ocfs2_rotate_tree_left_44705 __ocfs2_rotate_tree_left 0 44705 NULL
108612 +xfer_to_user_44713 xfer_to_user 3 44713 NULL nohasharray
108613 +__generic_block_fiemap_44713 __generic_block_fiemap 4 44713 &xfer_to_user_44713
108614 +_zd_iowrite32v_locked_44725 _zd_iowrite32v_locked 3 44725 NULL
108615 +clusterip_proc_write_44729 clusterip_proc_write 3 44729 NULL
108616 +fib_count_nexthops_44730 fib_count_nexthops 0 44730 NULL
108617 +SyS_read_44732 SyS_read 3 44732 NULL
108618 +vm_insert_mixed_44738 vm_insert_mixed 3 44738 NULL
108619 +key_tx_rx_count_read_44742 key_tx_rx_count_read 3 44742 NULL
108620 +set_brk_44749 set_brk 1-2 44749 NULL
108621 +tnode_new_44757 tnode_new 3 44757 NULL nohasharray
108622 +pty_write_44757 pty_write 3 44757 &tnode_new_44757
108623 +__videobuf_copy_stream_44769 __videobuf_copy_stream 4-0 44769 NULL
108624 +handsfree_ramp_44777 handsfree_ramp 2 44777 NULL
108625 +irq_domain_add_legacy_44781 irq_domain_add_legacy 4-2 44781 NULL
108626 +sctp_setsockopt_44788 sctp_setsockopt 5 44788 NULL
108627 +rx_dropped_read_44799 rx_dropped_read 3 44799 NULL
108628 +qla4xxx_alloc_work_44813 qla4xxx_alloc_work 2 44813 NULL
108629 +mei_cl_read_start_44824 mei_cl_read_start 2 44824 NULL
108630 +rmap_write_protect_44833 rmap_write_protect 2 44833 NULL
108631 +sisusb_write_44834 sisusb_write 3 44834 NULL
108632 +kvm_read_hva_44847 kvm_read_hva 3 44847 NULL
108633 +cubic_root_44848 cubic_root 1 44848 NULL
108634 +qib_verbs_send_dma_44850 qib_verbs_send_dma 6 44850 NULL
108635 +copydesc_user_44855 copydesc_user 3 44855 NULL
108636 +i915_gem_execbuffer_relocate_44868 i915_gem_execbuffer_relocate 0 44868 NULL
108637 +init_rs_44873 init_rs 1 44873 NULL
108638 +skb_availroom_44883 skb_availroom 0 44883 NULL
108639 +ocfs2_wait_for_mask_44893 ocfs2_wait_for_mask 0 44893 NULL
108640 +do_tty_write_44896 do_tty_write 5 44896 NULL
108641 +regmap_spi_read_44921 regmap_spi_read 3-5 44921 NULL
108642 +_snd_pcm_hw_param_last_44947 _snd_pcm_hw_param_last 0 44947 NULL
108643 +__add_delayed_refs_44977 __add_delayed_refs 0 44977 NULL
108644 +tx_queue_status_read_44978 tx_queue_status_read 3 44978 NULL
108645 +map_index_to_lba_44993 map_index_to_lba 0-1 44993 NULL
108646 +send_chmod_45011 send_chmod 0 45011 NULL
108647 +i915_gem_do_execbuffer_45012 i915_gem_do_execbuffer 0 45012 NULL
108648 +bytepos_delta_45017 bytepos_delta 0-2 45017 NULL
108649 +read_block_bitmap_45021 read_block_bitmap 2 45021 NULL nohasharray
108650 +ptrace_writedata_45021 ptrace_writedata 4-3 45021 &read_block_bitmap_45021
108651 +dm_kvzalloc_45025 dm_kvzalloc 1-2 45025 NULL
108652 +vhci_get_user_45039 vhci_get_user 3 45039 NULL
108653 +sysfs_do_create_link_sd_45057 sysfs_do_create_link_sd 0 45057 NULL
108654 +remap_oldmem_pfn_range_45060 remap_oldmem_pfn_range 2-3-4 45060 NULL nohasharray
108655 +sel_write_user_45060 sel_write_user 3 45060 &remap_oldmem_pfn_range_45060
108656 +vmscan_swappiness_45062 vmscan_swappiness 0 45062 NULL
108657 +snd_mixart_BA0_read_45069 snd_mixart_BA0_read 5 45069 NULL
108658 +kvm_mmu_page_get_gfn_45110 kvm_mmu_page_get_gfn 0-2 45110 NULL
108659 +pwr_missing_bcns_cnt_read_45113 pwr_missing_bcns_cnt_read 3 45113 NULL
108660 +usbdev_read_45114 usbdev_read 3 45114 NULL
108661 +send_to_tty_45141 send_to_tty 3 45141 NULL
108662 +stmpe_irq_map_45146 stmpe_irq_map 2 45146 NULL
108663 +cfs_trace_daemon_command_usrstr_45147 cfs_trace_daemon_command_usrstr 2 45147 NULL
108664 +gen_bitmask_string_45149 gen_bitmask_string 6 45149 NULL
108665 +ocfs2_remove_inode_range_45156 ocfs2_remove_inode_range 3-4 45156 NULL nohasharray
108666 +device_write_45156 device_write 3 45156 &ocfs2_remove_inode_range_45156
108667 +ocfs2_dq_frozen_trigger_45159 ocfs2_dq_frozen_trigger 4 45159 NULL
108668 +SYSC_write_45160 SYSC_write 3 45160 NULL
108669 +tomoyo_write_self_45161 tomoyo_write_self 3 45161 NULL
108670 +sta_agg_status_write_45164 sta_agg_status_write 3 45164 NULL
108671 +sctp_pack_cookie_45190 sctp_pack_cookie 6 45190 NULL nohasharray
108672 +snd_sb_csp_load_user_45190 snd_sb_csp_load_user 3 45190 &sctp_pack_cookie_45190
108673 +num_clusters_in_group_45194 num_clusters_in_group 2 45194 NULL
108674 +__radix_tree_preload_45197 __radix_tree_preload 0 45197 NULL
108675 +iso_alloc_urb_45206 iso_alloc_urb 4-5 45206 NULL
108676 +spi_alloc_master_45223 spi_alloc_master 2 45223 NULL
108677 +__dirty_45228 __dirty 2 45228 NULL
108678 +ieee80211_if_read_peer_45233 ieee80211_if_read_peer 3 45233 NULL
108679 +event_enable_write_45238 event_enable_write 3 45238 NULL
108680 +prism2_pda_proc_read_45246 prism2_pda_proc_read 3 45246 NULL
108681 +input_mt_init_slots_45279 input_mt_init_slots 2 45279 NULL
108682 +gfs2_fiemap_45282 gfs2_fiemap 4 45282 NULL
108683 +snd_pcm_oss_sync1_45298 snd_pcm_oss_sync1 2 45298 NULL
108684 +e1000_tx_map_45309 e1000_tx_map 5 45309 NULL
108685 +pte_val_45313 pte_val 0 45313 NULL
108686 +copy_vm86_regs_from_user_45340 copy_vm86_regs_from_user 3 45340 NULL
108687 +null_alloc_repbuf_45375 null_alloc_repbuf 3 45375 NULL
108688 +sock_recv_errqueue_45412 sock_recv_errqueue 3 45412 NULL
108689 +paging64_gva_to_gpa_45421 paging64_gva_to_gpa 2 45421 NULL nohasharray
108690 +ieee80211_if_fmt_dot11MeshHWMProotInterval_45421 ieee80211_if_fmt_dot11MeshHWMProotInterval 3 45421 &paging64_gva_to_gpa_45421
108691 +alloc_task_struct_node_45426 alloc_task_struct_node 1 45426 NULL
108692 +ll_iocontrol_register_45430 ll_iocontrol_register 2 45430 NULL
108693 +tty_buffer_alloc_45437 tty_buffer_alloc 2 45437 NULL
108694 +SYSC_mremap_45446 SYSC_mremap 5-1-2-3 45446 NULL
108695 +__node_remap_45458 __node_remap 4 45458 NULL
108696 +rds_ib_set_wr_signal_state_45463 rds_ib_set_wr_signal_state 0 45463 NULL
108697 +tracing_read_dyn_info_45468 tracing_read_dyn_info 3 45468 NULL
108698 +snd_pcm_hwsync_45479 snd_pcm_hwsync 0 45479 NULL
108699 +arizona_init_fll_45503 arizona_init_fll 5 45503 NULL
108700 +rds_message_copy_from_user_45510 rds_message_copy_from_user 3 45510 NULL
108701 +i40e_alloc_vfs_45511 i40e_alloc_vfs 2 45511 NULL
108702 +__split_vma_45522 __split_vma 0 45522 NULL
108703 +cgroup_read_u64_45532 cgroup_read_u64 5 45532 NULL
108704 +copy_macs_45534 copy_macs 4 45534 NULL
108705 +nla_attr_size_45545 nla_attr_size 0-1 45545 NULL
108706 +v9fs_direct_read_45546 v9fs_direct_read 3 45546 NULL
108707 +cx18_copy_mdl_to_user_45549 cx18_copy_mdl_to_user 4 45549 NULL
108708 +ext3_group_first_block_no_45555 ext3_group_first_block_no 0-2 45555 NULL
108709 +stats_dot11ACKFailureCount_read_45558 stats_dot11ACKFailureCount_read 3 45558 NULL
108710 +_regmap_bus_raw_write_45559 _regmap_bus_raw_write 2 45559 NULL
108711 +posix_acl_xattr_size_45561 posix_acl_xattr_size 0-1 45561 NULL
108712 +venus_rmdir_45564 venus_rmdir 4 45564 NULL
108713 +btrfs_find_free_ino_45585 btrfs_find_free_ino 0 45585 NULL
108714 +ipath_create_cq_45586 ipath_create_cq 2 45586 NULL
108715 +ath6kl_keepalive_write_45600 ath6kl_keepalive_write 3 45600 NULL
108716 +hidraw_get_report_45609 hidraw_get_report 3 45609 NULL
108717 +commit_cowonly_roots_45640 commit_cowonly_roots 0 45640 NULL
108718 +ebitmap_next_positive_45651 ebitmap_next_positive 3-0 45651 NULL
108719 +ext4_reserve_inode_write_45654 ext4_reserve_inode_write 0 45654 NULL
108720 +dma_map_cont_45668 dma_map_cont 5 45668 NULL
108721 +compat_mpctl_ioctl_45671 compat_mpctl_ioctl 2 45671 NULL
108722 +dgram_sendmsg_45679 dgram_sendmsg 4 45679 NULL
108723 +smk_write_ambient_45691 smk_write_ambient 3 45691 NULL
108724 +unix_dgram_sendmsg_45699 unix_dgram_sendmsg 4 45699 NULL nohasharray
108725 +bscnl_emit_45699 bscnl_emit 0-2-5 45699 &unix_dgram_sendmsg_45699
108726 +sg_proc_write_adio_45704 sg_proc_write_adio 3 45704 NULL
108727 +snd_cs46xx_io_read_45734 snd_cs46xx_io_read 5 45734 NULL nohasharray
108728 +task_cgroup_path_45734 task_cgroup_path 3 45734 &snd_cs46xx_io_read_45734
108729 +rw_copy_check_uvector_45748 rw_copy_check_uvector 3-0 45748 NULL nohasharray
108730 +v4l2_ctrl_new_std_45748 v4l2_ctrl_new_std 5 45748 &rw_copy_check_uvector_45748
108731 +lkdtm_debugfs_read_45752 lkdtm_debugfs_read 3 45752 NULL
108732 +btrfs_delete_delayed_dir_index_45757 btrfs_delete_delayed_dir_index 0 45757 NULL
108733 +alloc_ts_config_45775 alloc_ts_config 1 45775 NULL
108734 +osc_checksum_type_seq_write_45785 osc_checksum_type_seq_write 3 45785 NULL
108735 +raw_setsockopt_45800 raw_setsockopt 5 45800 NULL
108736 +rds_tcp_inc_copy_to_user_45804 rds_tcp_inc_copy_to_user 3 45804 NULL
108737 +lbs_rdbbp_read_45805 lbs_rdbbp_read 3 45805 NULL
108738 +pcpu_alloc_alloc_info_45813 pcpu_alloc_alloc_info 1-2 45813 NULL
108739 +ll_max_readahead_mb_seq_write_45815 ll_max_readahead_mb_seq_write 3 45815 NULL
108740 +fm_v4l2_init_video_device_45821 fm_v4l2_init_video_device 2 45821 NULL
108741 +memcg_update_cache_size_45828 memcg_update_cache_size 2 45828 NULL
108742 +ipv6_recv_rxpmtu_45830 ipv6_recv_rxpmtu 3 45830 NULL
108743 +task_state_char_45839 task_state_char 1 45839 NULL
108744 +__ip_select_ident_45851 __ip_select_ident 3 45851 NULL
108745 +x509_process_extension_45854 x509_process_extension 5 45854 NULL
108746 +efx_tx_queue_insert_45859 efx_tx_queue_insert 2 45859 NULL
108747 +isdn_write_45863 isdn_write 3 45863 NULL
108748 +unpack_orig_pfns_45867 unpack_orig_pfns 0 45867 NULL
108749 +tpm_config_in_45880 tpm_config_in 0 45880 NULL
108750 +get_rdac_req_45882 get_rdac_req 3 45882 NULL
108751 +ocfs2_xattr_block_find_45891 ocfs2_xattr_block_find 0 45891 NULL
108752 +online_page_cgroup_45894 online_page_cgroup 3 45894 NULL
108753 +cfs_cpt_weight_45903 cfs_cpt_weight 0 45903 NULL
108754 +wm_adsp_region_to_reg_45915 wm_adsp_region_to_reg 0-2 45915 NULL
108755 +dbgfs_frame_45917 dbgfs_frame 3 45917 NULL
108756 +alloc_mr_45935 alloc_mr 1 45935 NULL
108757 +kmem_cache_alloc_node_trace_45968 kmem_cache_alloc_node_trace 3 45968 NULL
108758 +copy_to_45969 copy_to 3 45969 NULL
108759 +rb_simple_read_45972 rb_simple_read 3 45972 NULL
108760 +ioat2_dca_count_dca_slots_45984 ioat2_dca_count_dca_slots 0 45984 NULL
108761 +ore_calc_stripe_info_46023 ore_calc_stripe_info 2 46023 NULL
108762 +sierra_setup_urb_46029 sierra_setup_urb 5 46029 NULL
108763 +get_free_entries_46030 get_free_entries 1 46030 NULL
108764 +__access_remote_vm_46031 __access_remote_vm 0-3-5 46031 NULL
108765 +snd_emu10k1x_ptr_read_46049 snd_emu10k1x_ptr_read 0 46049 NULL
108766 +acpi_register_gsi_xen_hvm_46052 acpi_register_gsi_xen_hvm 2 46052 NULL
108767 +__ocfs2_move_extent_46060 __ocfs2_move_extent 5-6-4-0-3 46060 NULL nohasharray
108768 +dma_tx_errors_read_46060 dma_tx_errors_read 3 46060 &__ocfs2_move_extent_46060
108769 +sel_commit_bools_write_46077 sel_commit_bools_write 3 46077 NULL
108770 +arizona_set_irq_wake_46101 arizona_set_irq_wake 2 46101 NULL
108771 +memcg_update_array_size_46111 memcg_update_array_size 1 46111 NULL nohasharray
108772 +il3945_ucode_general_stats_read_46111 il3945_ucode_general_stats_read 3 46111 &memcg_update_array_size_46111
108773 +C_SYSC_writev_46113 C_SYSC_writev 3 46113 NULL
108774 +mlx4_ib_alloc_fast_reg_page_list_46119 mlx4_ib_alloc_fast_reg_page_list 2 46119 NULL
108775 +paging32_walk_addr_nested_46121 paging32_walk_addr_nested 3 46121 NULL
108776 +replay_dir_deletes_46136 replay_dir_deletes 0 46136 NULL
108777 +rtw_buf_update_46138 rtw_buf_update 4 46138 NULL
108778 +vb2_dma_sg_get_userptr_46146 vb2_dma_sg_get_userptr 2-3 46146 NULL
108779 +__netlink_change_ngroups_46156 __netlink_change_ngroups 2 46156 NULL
108780 +alloc_iova_46160 alloc_iova 2 46160 NULL
108781 +twl_direction_out_46182 twl_direction_out 2 46182 NULL
108782 +vxge_os_dma_malloc_46184 vxge_os_dma_malloc 2 46184 NULL
108783 +fq_resize_46195 fq_resize 2 46195 NULL
108784 +add_conn_list_46197 add_conn_list 3-0 46197 NULL
108785 +i2400m_op_msg_from_user_46213 i2400m_op_msg_from_user 4 46213 NULL
108786 +tm6000_i2c_recv_regs_46215 tm6000_i2c_recv_regs 5 46215 NULL
108787 +dsp_write_46218 dsp_write 2 46218 NULL
108788 +xen_setup_msi_irqs_46245 xen_setup_msi_irqs 2 46245 NULL
108789 +mpi_read_raw_data_46248 mpi_read_raw_data 2 46248 NULL
108790 +ReadReg_46277 ReadReg 0 46277 NULL
108791 +sg_proc_write_dressz_46316 sg_proc_write_dressz 3 46316 NULL
108792 +__hwahc_dev_set_key_46328 __hwahc_dev_set_key 5 46328 NULL nohasharray
108793 +compat_SyS_readv_46328 compat_SyS_readv 3 46328 &__hwahc_dev_set_key_46328
108794 +iwl_dbgfs_chain_noise_read_46355 iwl_dbgfs_chain_noise_read 3 46355 NULL
108795 +smk_write_direct_46363 smk_write_direct 3 46363 NULL
108796 +__iommu_calculate_agaw_46366 __iommu_calculate_agaw 2 46366 NULL
108797 +ubi_dump_flash_46381 ubi_dump_flash 4 46381 NULL
108798 +mmap_ureg_46388 mmap_ureg 3 46388 NULL
108799 +vma_adjust_46390 vma_adjust 0 46390 NULL
108800 +fuse_file_aio_write_46399 fuse_file_aio_write 4 46399 NULL
108801 +crypto_ablkcipher_reqsize_46411 crypto_ablkcipher_reqsize 0 46411 NULL
108802 +ttm_page_pool_get_pages_46431 ttm_page_pool_get_pages 0-5 46431 NULL
108803 +cfs_power2_roundup_46433 cfs_power2_roundup 0-1 46433 NULL
108804 +hash_ipportip6_expire_46443 hash_ipportip6_expire 3 46443 NULL
108805 +cp210x_set_config_46447 cp210x_set_config 4 46447 NULL
108806 +parport_pc_fifo_write_block_46455 parport_pc_fifo_write_block 3 46455 NULL
108807 +il_dbgfs_clear_traffic_stats_write_46458 il_dbgfs_clear_traffic_stats_write 3 46458 NULL
108808 +filldir64_46469 filldir64 3 46469 NULL
108809 +ocfs2_reserve_clusters_with_limit_46479 ocfs2_reserve_clusters_with_limit 0 46479 NULL
108810 +fill_in_write_vector_46498 fill_in_write_vector 0 46498 NULL
108811 +p9pdu_vreadf_46500 p9pdu_vreadf 0 46500 NULL
108812 +pin_code_reply_46510 pin_code_reply 4 46510 NULL
108813 +mthca_alloc_cq_buf_46512 mthca_alloc_cq_buf 3 46512 NULL
108814 +nouveau_drm_ioctl_46514 nouveau_drm_ioctl 2 46514 NULL nohasharray
108815 +kmsg_read_46514 kmsg_read 3 46514 &nouveau_drm_ioctl_46514
108816 +bdx_rxdb_create_46525 bdx_rxdb_create 1 46525 NULL
108817 +nl80211_send_rx_assoc_46538 nl80211_send_rx_assoc 4 46538 NULL
108818 +pm860x_irq_domain_map_46553 pm860x_irq_domain_map 2 46553 NULL
108819 +link_send_sections_long_46556 link_send_sections_long 4 46556 NULL
108820 +dn_current_mss_46574 dn_current_mss 0 46574 NULL nohasharray
108821 +blk_flush_complete_seq_46574 blk_flush_complete_seq 2 46574 &dn_current_mss_46574
108822 +serverworks_create_gatt_pages_46582 serverworks_create_gatt_pages 1 46582 NULL
108823 +snd_compr_write_data_46592 snd_compr_write_data 3 46592 NULL
108824 +il3945_stats_flag_46606 il3945_stats_flag 0-3 46606 NULL
108825 +vscnprintf_46617 vscnprintf 0-2 46617 NULL
108826 +__kfifo_out_r_46623 __kfifo_out_r 3-0 46623 NULL
108827 +request_key_async_with_auxdata_46624 request_key_async_with_auxdata 4 46624 NULL
108828 +pci_enable_device_46642 pci_enable_device 0 46642 NULL
108829 +vfs_getxattr_alloc_46649 vfs_getxattr_alloc 0 46649 NULL
108830 +av7110_ipack_init_46655 av7110_ipack_init 2 46655 NULL
108831 +e1000_tx_map_46672 e1000_tx_map 4 46672 NULL
108832 +alloc_data_packet_46698 alloc_data_packet 1 46698 NULL
108833 +__ilog2_u32_46706 __ilog2_u32 0 46706 NULL
108834 +erst_dbg_write_46715 erst_dbg_write 3 46715 NULL
108835 +ide_read_status_46719 ide_read_status 0 46719 NULL
108836 +wl1271_rx_filter_alloc_field_46721 wl1271_rx_filter_alloc_field 5 46721 NULL
108837 +irq_domain_add_simple_46734 irq_domain_add_simple 2-3 46734 NULL
108838 +ext4_count_free_46754 ext4_count_free 2 46754 NULL nohasharray
108839 +pte_pfn_46754 pte_pfn 0 46754 &ext4_count_free_46754
108840 +ntfs2utc_46762 ntfs2utc 1 46762 NULL
108841 +hest_ghes_dev_register_46766 hest_ghes_dev_register 1 46766 NULL
108842 +int_hw_irq_en_46776 int_hw_irq_en 3 46776 NULL
108843 +regcache_lzo_sync_46777 regcache_lzo_sync 2 46777 NULL
108844 +_xfs_buf_get_pages_46811 _xfs_buf_get_pages 2 46811 NULL
108845 +btrfs_insert_empty_items_46822 btrfs_insert_empty_items 0 46822 NULL
108846 +xfs_iroot_realloc_46826 xfs_iroot_realloc 2 46826 NULL
108847 +shmem_pwrite_fast_46842 shmem_pwrite_fast 3 46842 NULL
108848 +readreg_46845 readreg 0 46845 NULL
108849 +spi_async_46857 spi_async 0 46857 NULL
108850 +vsnprintf_46863 vsnprintf 0 46863 NULL nohasharray
108851 +SyS_move_pages_46863 SyS_move_pages 2 46863 &vsnprintf_46863
108852 +nvme_alloc_queue_46865 nvme_alloc_queue 3 46865 NULL
108853 +rvmalloc_46873 rvmalloc 1 46873 NULL
108854 +qp_memcpy_from_queue_iov_46874 qp_memcpy_from_queue_iov 5-4 46874 NULL
108855 +lov_iocontrol_46876 lov_iocontrol 3 46876 NULL
108856 +stmpe_gpio_irq_unmap_46884 stmpe_gpio_irq_unmap 2 46884 NULL
108857 +ixgbe_dbg_reg_ops_write_46895 ixgbe_dbg_reg_ops_write 3 46895 NULL
108858 +sk_mem_pages_46896 sk_mem_pages 0-1 46896 NULL
108859 +ol_dqblk_off_46904 ol_dqblk_off 2-3 46904 NULL
108860 +ieee80211_if_fmt_power_mode_46906 ieee80211_if_fmt_power_mode 3 46906 NULL
108861 +wlcore_alloc_hw_46917 wlcore_alloc_hw 1-3 46917 NULL
108862 +fb_write_46924 fb_write 3 46924 NULL
108863 +i915_gem_check_olr_46925 i915_gem_check_olr 0 46925 NULL
108864 +kvm_register_read_46948 kvm_register_read 0 46948 NULL
108865 +__sctp_setsockopt_connectx_46949 __sctp_setsockopt_connectx 3 46949 NULL
108866 +qla4xxx_post_aen_work_46953 qla4xxx_post_aen_work 3 46953 NULL
108867 +SYSC_poll_46965 SYSC_poll 2 46965 NULL
108868 +crypto_tfm_alg_alignmask_46971 crypto_tfm_alg_alignmask 0 46971 NULL
108869 +mgmt_pending_add_46976 mgmt_pending_add 5 46976 NULL
108870 +strlcat_46985 strlcat 3 46985 NULL
108871 +bitmap_file_clear_bit_46990 bitmap_file_clear_bit 2 46990 NULL
108872 +gfs2_xattr_system_set_46996 gfs2_xattr_system_set 4 46996 NULL nohasharray
108873 +sel_write_bool_46996 sel_write_bool 3 46996 &gfs2_xattr_system_set_46996
108874 +ttm_bo_io_47000 ttm_bo_io 5 47000 NULL
108875 +blk_rq_map_kern_47004 blk_rq_map_kern 4 47004 NULL
108876 +__map_single_47020 __map_single 3-4-7-0 47020 NULL
108877 +cx231xx_init_bulk_47024 cx231xx_init_bulk 3-2-4 47024 NULL
108878 +fs_path_len_47060 fs_path_len 0 47060 NULL
108879 +ufs_new_fragments_47070 ufs_new_fragments 4-3-5 47070 NULL nohasharray
108880 +ext4_xattr_list_entries_47070 ext4_xattr_list_entries 0-4 47070 &ufs_new_fragments_47070
108881 +pipeline_dec_packet_in_read_47076 pipeline_dec_packet_in_read 3 47076 NULL
108882 +scsi_deactivate_tcq_47086 scsi_deactivate_tcq 2 47086 NULL
108883 +iwl_dump_nic_event_log_47089 iwl_dump_nic_event_log 0 47089 NULL
108884 +ptlrpc_lprocfs_threads_max_seq_write_47104 ptlrpc_lprocfs_threads_max_seq_write 3 47104 NULL
108885 +tboot_log_read_47110 tboot_log_read 3 47110 NULL
108886 +mousedev_read_47123 mousedev_read 3 47123 NULL
108887 +ubi_compare_lebs_47141 ubi_compare_lebs 0 47141 NULL
108888 +ses_recv_diag_47143 ses_recv_diag 4 47143 NULL nohasharray
108889 +acpi_ut_initialize_buffer_47143 acpi_ut_initialize_buffer 2 47143 &ses_recv_diag_47143
108890 +mxms_headerlen_47161 mxms_headerlen 0 47161 NULL
108891 +rs_sta_dbgfs_rate_scale_data_read_47165 rs_sta_dbgfs_rate_scale_data_read 3 47165 NULL
108892 +alloc_cpumask_var_node_47167 alloc_cpumask_var_node 3 47167 NULL
108893 +bpf_alloc_binary_47170 bpf_alloc_binary 1 47170 NULL
108894 +rts51x_ms_rw_47171 rts51x_ms_rw 3-4 47171 NULL
108895 +btrfs_del_inode_ref_47181 btrfs_del_inode_ref 0 47181 NULL
108896 +can_set_system_xattr_47182 can_set_system_xattr 4 47182 NULL
108897 +options_write_47243 options_write 3 47243 NULL
108898 +portcntrs_1_read_47253 portcntrs_1_read 3 47253 NULL
108899 +da9052_disable_irq_nosync_47260 da9052_disable_irq_nosync 2 47260 NULL
108900 +p9pdu_readf_47269 p9pdu_readf 0 47269 NULL
108901 +ablkcipher_next_slow_47274 ablkcipher_next_slow 3-4 47274 NULL
108902 +gfs2_readpages_47285 gfs2_readpages 4 47285 NULL
108903 +vsnprintf_47291 vsnprintf 0 47291 NULL
108904 +tx_internal_desc_overflow_read_47300 tx_internal_desc_overflow_read 3 47300 NULL
108905 +iterate_inode_ref_47322 iterate_inode_ref 0 47322 NULL
108906 +SyS_madvise_47354 SyS_madvise 1-2 47354 NULL
108907 +ieee80211_if_read_dot11MeshHoldingTimeout_47356 ieee80211_if_read_dot11MeshHoldingTimeout 3 47356 NULL
108908 +avc_get_hash_stats_47359 avc_get_hash_stats 0 47359 NULL
108909 +find_first_zero_bit_le_47369 find_first_zero_bit_le 2 47369 NULL
108910 +__bio_map_kern_47379 __bio_map_kern 3 47379 NULL
108911 +nv_rd32_47390 nv_rd32 0 47390 NULL nohasharray
108912 +trace_options_core_read_47390 trace_options_core_read 3 47390 &nv_rd32_47390
108913 +nametbl_list_47391 nametbl_list 2 47391 NULL
108914 +dgrp_net_write_47392 dgrp_net_write 3 47392 NULL
108915 +pfkey_sendmsg_47394 pfkey_sendmsg 4 47394 NULL
108916 +gfn_to_pfn_prot_47398 gfn_to_pfn_prot 2 47398 NULL
108917 +lbs_wrmac_write_47400 lbs_wrmac_write 3 47400 NULL
108918 +ocfs2_resv_end_47408 ocfs2_resv_end 0 47408 NULL
108919 +sta_vht_capa_read_47409 sta_vht_capa_read 3 47409 NULL
108920 +crypto_ablkcipher_alignmask_47410 crypto_ablkcipher_alignmask 0 47410 NULL
108921 +lbs_wrrf_write_47418 lbs_wrrf_write 3 47418 NULL
108922 +vzalloc_47421 vzalloc 1 47421 NULL
108923 +hash_ipportip4_expire_47426 hash_ipportip4_expire 3 47426 NULL
108924 +posix_acl_from_disk_47445 posix_acl_from_disk 2 47445 NULL
108925 +numaq_apicid_to_cpu_present_47449 numaq_apicid_to_cpu_present 1 47449 NULL
108926 +environ_read_47451 environ_read 3 47451 NULL
108927 +__load_mapping_47460 __load_mapping 2 47460 NULL
108928 +try_lock_extent_47465 try_lock_extent 0 47465 NULL
108929 +nvme_trans_send_fw_cmd_47479 nvme_trans_send_fw_cmd 4 47479 NULL
108930 +wb_force_mapping_47485 wb_force_mapping 2 47485 NULL nohasharray
108931 +newpart_47485 newpart 6-4 47485 &wb_force_mapping_47485
108932 +mcp23s17_read_regs_47491 mcp23s17_read_regs 4 47491 NULL
108933 +core_sys_select_47494 core_sys_select 1 47494 NULL
108934 +read_block_for_search_47502 read_block_for_search 0 47502 NULL
108935 +alloc_arraycache_47505 alloc_arraycache 2-1 47505 NULL nohasharray
108936 +btrfs_init_inode_security_47505 btrfs_init_inode_security 0 47505 &alloc_arraycache_47505
108937 +unlink_simple_47506 unlink_simple 3 47506 NULL
108938 +pstore_decompress_47510 pstore_decompress 0 47510 NULL
108939 +ufs_inode_getblock_47512 ufs_inode_getblock 4 47512 NULL
108940 +__proc_lnet_portal_rotor_47529 __proc_lnet_portal_rotor 5 47529 NULL
108941 +snd_pcm_resume_47530 snd_pcm_resume 0 47530 NULL
108942 +vscnprintf_47533 vscnprintf 0-2 47533 NULL nohasharray
108943 +process_vm_rw_47533 process_vm_rw 3-5 47533 &vscnprintf_47533
108944 +einj_check_trigger_header_47534 einj_check_trigger_header 0 47534 NULL
108945 +ieee80211_if_fmt_min_discovery_timeout_47539 ieee80211_if_fmt_min_discovery_timeout 3 47539 NULL
108946 +ocfs2_resv_find_window_47557 ocfs2_resv_find_window 3 47557 NULL
108947 +read_ldt_47570 read_ldt 2 47570 NULL
108948 +isku_sysfs_read_last_set_47572 isku_sysfs_read_last_set 6 47572 NULL
108949 +rpipe_get_idx_47579 rpipe_get_idx 2-0 47579 NULL
108950 +SYSC_fcntl64_47581 SYSC_fcntl64 3 47581 NULL
108951 +vendorextnReadSection_47583 vendorextnReadSection 0 47583 NULL
108952 +btrfs_stack_header_bytenr_47589 btrfs_stack_header_bytenr 0 47589 NULL
108953 +ext4_kvzalloc_47605 ext4_kvzalloc 1 47605 NULL
108954 +sctp_ssnmap_new_47608 sctp_ssnmap_new 2-1 47608 NULL
108955 +cache_read_pipefs_47615 cache_read_pipefs 3 47615 NULL
108956 +twl4030_clear_set_47624 twl4030_clear_set 4 47624 NULL
108957 +irq_set_chip_47638 irq_set_chip 1 47638 NULL
108958 +get_size_47644 get_size 1-2 47644 NULL
108959 +snd_pcm_info_47699 snd_pcm_info 0 47699 NULL
108960 +packet_recvmsg_47700 packet_recvmsg 4 47700 NULL nohasharray
108961 +ipath_format_hwmsg_47700 ipath_format_hwmsg 2 47700 &packet_recvmsg_47700
108962 +global_rt_runtime_47712 global_rt_runtime 0 47712 NULL
108963 +save_microcode_47717 save_microcode 3 47717 NULL
108964 +bits_to_user_47733 bits_to_user 3-2 47733 NULL
108965 +carl9170_debugfs_read_47738 carl9170_debugfs_read 3 47738 NULL
108966 +ir_prepare_write_buffer_47747 ir_prepare_write_buffer 3 47747 NULL
108967 +mvumi_alloc_mem_resource_47750 mvumi_alloc_mem_resource 3 47750 NULL
108968 +ext3_find_near_47752 ext3_find_near 0 47752 NULL
108969 +alloc_sched_domains_47756 alloc_sched_domains 1 47756 NULL
108970 +uwb_ie_dump_hex_47774 uwb_ie_dump_hex 4 47774 NULL
108971 +SyS_setgroups16_47780 SyS_setgroups16 1 47780 NULL
108972 +error_error_numll_frame_cts_start_read_47781 error_error_numll_frame_cts_start_read 3 47781 NULL
108973 +posix_acl_fix_xattr_from_user_47793 posix_acl_fix_xattr_from_user 2 47793 NULL
108974 +W6692_empty_Bfifo_47804 W6692_empty_Bfifo 2 47804 NULL
108975 +lov_packmd_47810 lov_packmd 0 47810 NULL
108976 +pinconf_dbg_config_write_47835 pinconf_dbg_config_write 3 47835 NULL
108977 +KEY_SIZE_47855 KEY_SIZE 0 47855 NULL
108978 +ubifs_unpack_nnode_47866 ubifs_unpack_nnode 0 47866 NULL
108979 +vhci_read_47878 vhci_read 3 47878 NULL
108980 +ubi_wl_put_peb_47886 ubi_wl_put_peb 0 47886 NULL
108981 +keyctl_instantiate_key_common_47889 keyctl_instantiate_key_common 4 47889 NULL
108982 +load_mapping_47904 load_mapping 3 47904 NULL
108983 +btrfs_duplicate_item_47910 btrfs_duplicate_item 0 47910 NULL
108984 +cfs_percpt_alloc_47918 cfs_percpt_alloc 2 47918 NULL
108985 +comedi_write_47926 comedi_write 3 47926 NULL
108986 +nvme_trans_get_blk_desc_len_47946 nvme_trans_get_blk_desc_len 0-2 47946 NULL
108987 +lp8788_irq_map_47964 lp8788_irq_map 2 47964 NULL
108988 +gether_get_ifname_47972 gether_get_ifname 3 47972 NULL
108989 +iwl_dbgfs_ucode_tracing_read_47983 iwl_dbgfs_ucode_tracing_read 3 47983 NULL nohasharray
108990 +mempool_resize_47983 mempool_resize 2 47983 &iwl_dbgfs_ucode_tracing_read_47983
108991 +dbg_port_buf_47990 dbg_port_buf 2 47990 NULL
108992 +ib_umad_write_47993 ib_umad_write 3 47993 NULL
108993 +ocfs2_find_refcount_split_pos_48001 ocfs2_find_refcount_split_pos 0 48001 NULL
108994 +lustre_cfg_len_48002 lustre_cfg_len 0 48002 NULL
108995 +gdm_tty_recv_complete_48011 gdm_tty_recv_complete 2 48011 NULL
108996 +ffs_epfile_write_48014 ffs_epfile_write 3 48014 NULL
108997 +bio_integrity_set_tag_48035 bio_integrity_set_tag 3 48035 NULL
108998 +pppoe_sendmsg_48039 pppoe_sendmsg 4 48039 NULL
108999 +SYSC_writev_48040 SYSC_writev 3 48040 NULL
109000 +btrfs_reserve_extent_48044 btrfs_reserve_extent 6 48044 NULL
109001 +wpan_phy_alloc_48056 wpan_phy_alloc 1 48056 NULL
109002 +ocfs2_change_refcount_rec_48059 ocfs2_change_refcount_rec 0 48059 NULL
109003 +posix_acl_alloc_48063 posix_acl_alloc 1 48063 NULL
109004 +palmas_bulk_write_48068 palmas_bulk_write 2-3-5 48068 NULL
109005 +disc_write_48070 disc_write 3 48070 NULL
109006 +mmc_alloc_host_48097 mmc_alloc_host 1 48097 NULL
109007 +skb_copy_datagram_const_iovec_48102 skb_copy_datagram_const_iovec 4-2-5-0 48102 NULL
109008 +radio_isa_common_probe_48107 radio_isa_common_probe 3 48107 NULL
109009 +vmw_framebuffer_surface_dirty_48132 vmw_framebuffer_surface_dirty 6 48132 NULL
109010 +set_discoverable_48141 set_discoverable 4 48141 NULL
109011 +dn_fib_count_nhs_48145 dn_fib_count_nhs 0 48145 NULL
109012 +get_cur_inode_state_48149 get_cur_inode_state 0 48149 NULL
109013 +_add_to_r4w_48152 _add_to_r4w 4 48152 NULL nohasharray
109014 +bitmap_onto_48152 bitmap_onto 4 48152 &_add_to_r4w_48152
109015 +isr_dma1_done_read_48159 isr_dma1_done_read 3 48159 NULL
109016 +c4iw_id_table_alloc_48163 c4iw_id_table_alloc 3 48163 NULL
109017 +ocfs2_find_next_zero_bit_unaligned_48170 ocfs2_find_next_zero_bit_unaligned 2-3 48170 NULL nohasharray
109018 +rbd_obj_method_sync_48170 rbd_obj_method_sync 8 48170 &ocfs2_find_next_zero_bit_unaligned_48170
109019 +alloc_cc770dev_48186 alloc_cc770dev 1 48186 NULL
109020 +init_ipath_48187 init_ipath 1 48187 NULL
109021 +brcmf_sdio_chip_cm3_exitdl_48192 brcmf_sdio_chip_cm3_exitdl 4 48192 NULL
109022 +cfg80211_process_deauth_48200 cfg80211_process_deauth 3 48200 NULL
109023 +ext4_index_trans_blocks_48205 ext4_index_trans_blocks 0-2 48205 NULL
109024 +snd_seq_dump_var_event_48209 snd_seq_dump_var_event 0 48209 NULL
109025 +ll_direct_IO_26_48216 ll_direct_IO_26 4 48216 NULL
109026 +is_block_in_journal_48223 is_block_in_journal 3 48223 NULL
109027 +uv_blade_nr_possible_cpus_48226 uv_blade_nr_possible_cpus 0 48226 NULL
109028 +nilfs_readpages_48229 nilfs_readpages 4 48229 NULL
109029 +read_file_recv_48232 read_file_recv 3 48232 NULL
109030 +unaccount_shadowed_48233 unaccount_shadowed 2 48233 NULL nohasharray
109031 +blk_rq_pos_48233 blk_rq_pos 0 48233 &unaccount_shadowed_48233
109032 +nouveau_i2c_port_create__48240 nouveau_i2c_port_create_ 7 48240 NULL
109033 +nfsctl_transaction_read_48250 nfsctl_transaction_read 3 48250 NULL
109034 +batadv_socket_read_48257 batadv_socket_read 3 48257 NULL
109035 +cache_write_pipefs_48270 cache_write_pipefs 3 48270 NULL
109036 +trace_options_write_48275 trace_options_write 3 48275 NULL
109037 +send_set_info_48288 send_set_info 7 48288 NULL
109038 +lpfc_idiag_extacc_read_48301 lpfc_idiag_extacc_read 3 48301 NULL
109039 +timblogiw_read_48305 timblogiw_read 3 48305 NULL
109040 +hash_setkey_48310 hash_setkey 3 48310 NULL
109041 +audio_set_intf_req_48319 audio_set_intf_req 0 48319 NULL
109042 +kvm_mmu_pte_write_48340 kvm_mmu_pte_write 2 48340 NULL
109043 +__alloc_fd_48356 __alloc_fd 2 48356 NULL
109044 +skb_add_data_48363 skb_add_data 3 48363 NULL
109045 +tx_frag_init_called_read_48377 tx_frag_init_called_read 3 48377 NULL
109046 +lbs_debugfs_write_48413 lbs_debugfs_write 3 48413 NULL
109047 +uhid_event_from_user_48417 uhid_event_from_user 2 48417 NULL
109048 +div64_u64_rem_48418 div64_u64_rem 0-1-2 48418 NULL
109049 +snd_power_wait_48422 snd_power_wait 0 48422 NULL
109050 +pwr_tx_without_ps_read_48423 pwr_tx_without_ps_read 3 48423 NULL
109051 +hugepage_madvise_48435 hugepage_madvise 0 48435 NULL
109052 +print_filtered_48442 print_filtered 2-0 48442 NULL
109053 +tun_recvmsg_48463 tun_recvmsg 4 48463 NULL
109054 +compat_SyS_preadv64_48469 compat_SyS_preadv64 3 48469 NULL
109055 +ipath_format_hwerrors_48487 ipath_format_hwerrors 5 48487 NULL
109056 +init_section_page_cgroup_48489 init_section_page_cgroup 2 48489 NULL nohasharray
109057 +r8712_usbctrl_vendorreq_48489 r8712_usbctrl_vendorreq 6 48489 &init_section_page_cgroup_48489
109058 +ocfs2_refcount_cow_48495 ocfs2_refcount_cow 3 48495 NULL
109059 +send_control_msg_48498 send_control_msg 6 48498 NULL
109060 +mlx4_en_create_tx_ring_48501 mlx4_en_create_tx_ring 4 48501 NULL
109061 +count_masked_bytes_48507 count_masked_bytes 0-1 48507 NULL
109062 +diva_os_copy_to_user_48508 diva_os_copy_to_user 4 48508 NULL
109063 +brcmf_sdio_trap_info_48510 brcmf_sdio_trap_info 4 48510 NULL
109064 +phantom_get_free_48514 phantom_get_free 0 48514 NULL
109065 +drbd_bm_capacity_48530 drbd_bm_capacity 0 48530 NULL
109066 +ext3_splice_branch_48531 ext3_splice_branch 6 48531 NULL
109067 +raid10_size_48571 raid10_size 0-2-3 48571 NULL
109068 +llog_data_len_48607 llog_data_len 1 48607 NULL
109069 +ufs_dtogd_48616 ufs_dtogd 0-2 48616 NULL
109070 +do_ip_vs_set_ctl_48641 do_ip_vs_set_ctl 4 48641 NULL
109071 +ll_rw_extents_stats_pp_seq_write_48651 ll_rw_extents_stats_pp_seq_write 3 48651 NULL
109072 +mtd_read_48655 mtd_read 0 48655 NULL
109073 +aes_encrypt_packets_read_48666 aes_encrypt_packets_read 3 48666 NULL
109074 +ore_get_rw_state_48667 ore_get_rw_state 5-4 48667 NULL
109075 +sm501_create_subdev_48668 sm501_create_subdev 4-3 48668 NULL
109076 +ubi_eba_unmap_leb_48671 ubi_eba_unmap_leb 0 48671 NULL
109077 +hysdn_log_write_48694 hysdn_log_write 3 48694 NULL
109078 +altera_drscan_48698 altera_drscan 2 48698 NULL
109079 +kvm_set_irq_routing_48704 kvm_set_irq_routing 3 48704 NULL
109080 +recv_msg_48709 recv_msg 4 48709 NULL
109081 +lpfc_idiag_drbacc_write_48712 lpfc_idiag_drbacc_write 3 48712 NULL
109082 +SyS_lgetxattr_48719 SyS_lgetxattr 4 48719 NULL
109083 +ath6kl_usb_bmi_read_48745 ath6kl_usb_bmi_read 3 48745 NULL
109084 +ath6kl_regwrite_read_48747 ath6kl_regwrite_read 3 48747 NULL
109085 +event_buffer_read_48772 event_buffer_read 3 48772 NULL nohasharray
109086 +l2cap_segment_sdu_48772 l2cap_segment_sdu 4 48772 &event_buffer_read_48772
109087 +gfs2_direct_IO_48774 gfs2_direct_IO 4 48774 NULL
109088 +il3945_sta_dbgfs_stats_table_read_48802 il3945_sta_dbgfs_stats_table_read 3 48802 NULL
109089 +twa_change_queue_depth_48808 twa_change_queue_depth 2 48808 NULL
109090 +register_ftrace_profiler_48816 register_ftrace_profiler 0 48816 NULL
109091 +atomic_counters_read_48827 atomic_counters_read 3 48827 NULL
109092 +azx_get_position_48841 azx_get_position 0 48841 NULL
109093 +vc_do_resize_48842 vc_do_resize 4-3 48842 NULL
109094 +comedi_buf_write_alloc_48846 comedi_buf_write_alloc 0-2 48846 NULL
109095 +suspend_dtim_interval_write_48854 suspend_dtim_interval_write 3 48854 NULL
109096 +C_SYSC_pwritev64_48864 C_SYSC_pwritev64 3 48864 NULL nohasharray
109097 +viafb_dvp1_proc_write_48864 viafb_dvp1_proc_write 3 48864 &C_SYSC_pwritev64_48864
109098 +ide_port_alloc_devices_48866 ide_port_alloc_devices 2 48866 NULL
109099 +__ffs_ep0_read_events_48868 __ffs_ep0_read_events 3 48868 NULL
109100 +ext2_alloc_branch_48889 ext2_alloc_branch 4 48889 NULL
109101 +crypto_cipher_ctxsize_48890 crypto_cipher_ctxsize 0 48890 NULL
109102 +joydev_handle_JSIOCSAXMAP_48898 joydev_handle_JSIOCSAXMAP 3 48898 NULL
109103 +xdi_copy_to_user_48900 xdi_copy_to_user 4 48900 NULL
109104 +msg_hdr_sz_48908 msg_hdr_sz 0 48908 NULL
109105 +snd_pcm_update_hw_ptr_48925 snd_pcm_update_hw_ptr 0 48925 NULL
109106 +sep_crypto_dma_48937 sep_crypto_dma 0 48937 NULL
109107 +si5351_write_parameters_48940 si5351_write_parameters 2 48940 NULL
109108 +event_heart_beat_read_48961 event_heart_beat_read 3 48961 NULL
109109 +nand_ecc_test_run_48966 nand_ecc_test_run 1 48966 NULL
109110 +vmci_handle_arr_create_48971 vmci_handle_arr_create 1 48971 NULL
109111 +batadv_orig_hash_del_if_48972 batadv_orig_hash_del_if 2 48972 NULL
109112 +btrfs_delete_delayed_insertion_item_48981 btrfs_delete_delayed_insertion_item 0 48981 NULL
109113 +rds_rm_size_48996 rds_rm_size 0-2 48996 NULL
109114 +sel_write_enforce_48998 sel_write_enforce 3 48998 NULL
109115 +null_alloc_rs_49019 null_alloc_rs 2 49019 NULL
109116 +filemap_check_errors_49022 filemap_check_errors 0 49022 NULL
109117 +aic_inb_49023 aic_inb 0 49023 NULL
109118 +transient_status_49027 transient_status 4 49027 NULL
109119 +iwl_mvm_power_legacy_dbgfs_read_49038 iwl_mvm_power_legacy_dbgfs_read 4 49038 NULL nohasharray
109120 +ipath_reg_user_mr_49038 ipath_reg_user_mr 2-3 49038 &iwl_mvm_power_legacy_dbgfs_read_49038
109121 +aic7xxx_rem_scb_from_disc_list_49041 aic7xxx_rem_scb_from_disc_list 0 49041 NULL
109122 +setup_msi_irq_49052 setup_msi_irq 3-4 49052 NULL
109123 +ubi_read_49061 ubi_read 0 49061 NULL
109124 +scsi_register_49094 scsi_register 2 49094 NULL
109125 +paging64_walk_addr_nested_49100 paging64_walk_addr_nested 3 49100 NULL
109126 +compat_do_readv_writev_49102 compat_do_readv_writev 4 49102 NULL
109127 +xfrm_replay_state_esn_len_49119 xfrm_replay_state_esn_len 0 49119 NULL
109128 +ll_max_cached_mb_seq_write_49122 ll_max_cached_mb_seq_write 3 49122 NULL
109129 +qib_user_sdma_pin_pages_49136 qib_user_sdma_pin_pages 4-5-6 49136 NULL nohasharray
109130 +pt_read_49136 pt_read 3 49136 &qib_user_sdma_pin_pages_49136
109131 +read_file_49137 read_file 4 49137 NULL
109132 +ipwireless_tty_received_49154 ipwireless_tty_received 3 49154 NULL
109133 +f2fs_acl_count_49155 f2fs_acl_count 0-1 49155 NULL
109134 +ipw_queue_tx_init_49161 ipw_queue_tx_init 3 49161 NULL
109135 +ext4_free_clusters_after_init_49174 ext4_free_clusters_after_init 2 49174 NULL
109136 +__jfs_setxattr_49175 __jfs_setxattr 5 49175 NULL
109137 +ath6kl_bgscan_int_write_49178 ath6kl_bgscan_int_write 3 49178 NULL
109138 +dvb_dvr_ioctl_49182 dvb_dvr_ioctl 2 49182 NULL
109139 +print_queue_49191 print_queue 0 49191 NULL
109140 +root_nfs_cat_49192 root_nfs_cat 3 49192 NULL
109141 +iwl_dbgfs_ucode_general_stats_read_49199 iwl_dbgfs_ucode_general_stats_read 3 49199 NULL
109142 +il4965_rs_sta_dbgfs_stats_table_read_49206 il4965_rs_sta_dbgfs_stats_table_read 3 49206 NULL
109143 +do_jffs2_getxattr_49210 do_jffs2_getxattr 0 49210 NULL
109144 +resp_write_same_49217 resp_write_same 2 49217 NULL
109145 +nouveau_therm_create__49228 nouveau_therm_create_ 4 49228 NULL
109146 +hugetlb_cgroup_read_49259 hugetlb_cgroup_read 5 49259 NULL
109147 +ieee80211_if_read_rssi_threshold_49260 ieee80211_if_read_rssi_threshold 3 49260 NULL
109148 +isku_sysfs_read_keys_media_49268 isku_sysfs_read_keys_media 6 49268 NULL
109149 +ptlrpc_check_set_49277 ptlrpc_check_set 0 49277 NULL
109150 +rx_filter_beacon_filter_read_49279 rx_filter_beacon_filter_read 3 49279 NULL
109151 +__ext4_ext_dirty_49284 __ext4_ext_dirty 0 49284 NULL
109152 +ext4_xattr_ibody_list_49287 ext4_xattr_ibody_list 3-0 49287 NULL
109153 +viafb_dfph_proc_write_49288 viafb_dfph_proc_write 3 49288 NULL
109154 +uio_read_49300 uio_read 3 49300 NULL
109155 +ocfs2_resmap_find_free_bits_49301 ocfs2_resmap_find_free_bits 3 49301 NULL
109156 +isku_sysfs_read_keys_macro_49312 isku_sysfs_read_keys_macro 6 49312 NULL
109157 +update_ref_for_cow_49313 update_ref_for_cow 0 49313 NULL
109158 +SYSC_mincore_49319 SYSC_mincore 2-1 49319 NULL
109159 +fwtty_port_handler_49327 fwtty_port_handler 9 49327 NULL
109160 +srpt_alloc_ioctx_ring_49330 srpt_alloc_ioctx_ring 2-4-3 49330 NULL
109161 +__intel_map_single_49338 __intel_map_single 3-2 49338 NULL
109162 +intel_ring_invalidate_all_caches_49346 intel_ring_invalidate_all_caches 0 49346 NULL
109163 +joydev_ioctl_common_49359 joydev_ioctl_common 2 49359 NULL
109164 +ocfs2_remove_btree_range_49370 ocfs2_remove_btree_range 4-3-5 49370 NULL
109165 +iscsi_alloc_session_49390 iscsi_alloc_session 3 49390 NULL
109166 +ext4_ext_index_trans_blocks_49396 ext4_ext_index_trans_blocks 0 49396 NULL
109167 +rx_streaming_always_read_49401 rx_streaming_always_read 3 49401 NULL
109168 +tnode_alloc_49407 tnode_alloc 1 49407 NULL
109169 +samples_to_bytes_49426 samples_to_bytes 0-2 49426 NULL
109170 +ubi_add_to_av_49432 ubi_add_to_av 0 49432 NULL nohasharray
109171 +md_domain_init_49432 md_domain_init 2 49432 &ubi_add_to_av_49432
109172 +compat_do_msg_fill_49440 compat_do_msg_fill 3 49440 NULL
109173 +get_lru_size_49441 get_lru_size 0 49441 NULL
109174 +i915_gem_object_set_to_gtt_domain_49450 i915_gem_object_set_to_gtt_domain 0 49450 NULL
109175 +ocfs2_merge_rec_left_49455 ocfs2_merge_rec_left 0 49455 NULL
109176 +__hfsplus_getxattr_49460 __hfsplus_getxattr 0 49460 NULL
109177 +agp_3_5_isochronous_node_enable_49465 agp_3_5_isochronous_node_enable 3 49465 NULL
109178 +xfs_iformat_local_49472 xfs_iformat_local 4 49472 NULL
109179 +savu_sysfs_read_49473 savu_sysfs_read 6 49473 NULL
109180 +isr_decrypt_done_read_49490 isr_decrypt_done_read 3 49490 NULL
109181 +SyS_listxattr_49519 SyS_listxattr 3 49519 NULL
109182 +emulator_write_phys_49520 emulator_write_phys 2-4 49520 NULL
109183 +btrfs_add_free_space_49524 btrfs_add_free_space 0 49524 NULL
109184 +smk_write_access_49561 smk_write_access 3 49561 NULL
109185 +ntfs_malloc_nofs_49572 ntfs_malloc_nofs 1 49572 NULL
109186 +alloc_chunk_49575 alloc_chunk 1 49575 NULL
109187 +sctp_setsockopt_default_send_param_49578 sctp_setsockopt_default_send_param 3 49578 NULL
109188 +readfifo_49583 readfifo 1 49583 NULL
109189 +tap_write_49595 tap_write 3 49595 NULL
109190 +create_task_io_context_49601 create_task_io_context 3 49601 NULL
109191 +isr_wakeups_read_49607 isr_wakeups_read 3 49607 NULL
109192 +btrfs_mksubvol_49616 btrfs_mksubvol 3 49616 NULL
109193 +heap_init_49617 heap_init 2 49617 NULL
109194 +smk_write_doi_49621 smk_write_doi 3 49621 NULL
109195 +port_fops_read_49626 port_fops_read 3 49626 NULL
109196 +btrfsic_cmp_log_and_dev_bytenr_49628 btrfsic_cmp_log_and_dev_bytenr 2 49628 NULL
109197 +__swab32p_49657 __swab32p 0 49657 NULL
109198 +ubi_wl_flush_49682 ubi_wl_flush 0 49682 NULL
109199 +aa_simple_write_to_buffer_49683 aa_simple_write_to_buffer 3-4 49683 NULL
109200 +SyS_pwritev_49688 SyS_pwritev 3 49688 NULL
109201 +__setup_irq_49696 __setup_irq 0 49696 NULL
109202 +cx2341x_ctrl_new_menu_49700 cx2341x_ctrl_new_menu 3 49700 NULL
109203 +do_splice_to_49714 do_splice_to 4 49714 NULL
109204 +write_pool_49718 write_pool 3 49718 NULL
109205 +dm_thin_insert_block_49720 dm_thin_insert_block 2-3 49720 NULL
109206 +kvm_mmu_notifier_invalidate_page_49723 kvm_mmu_notifier_invalidate_page 3 49723 NULL
109207 +sep_create_dcb_dmatables_context_kernel_49728 sep_create_dcb_dmatables_context_kernel 6 49728 NULL
109208 +zd_usb_iowrite16v_49744 zd_usb_iowrite16v 3 49744 NULL
109209 +btrfs_chunk_num_stripes_49751 btrfs_chunk_num_stripes 0 49751 NULL
109210 +fuse_wr_pages_49753 fuse_wr_pages 0-1-2 49753 NULL
109211 +key_conf_keylen_read_49758 key_conf_keylen_read 3 49758 NULL
109212 +fuse_conn_waiting_read_49762 fuse_conn_waiting_read 3 49762 NULL
109213 +w83977af_fir_interrupt_49775 w83977af_fir_interrupt 0 49775 NULL
109214 +ceph_osdc_readpages_49789 ceph_osdc_readpages 0 49789 NULL
109215 +nfs4_acl_new_49806 nfs4_acl_new 1 49806 NULL
109216 +ntfs_copy_from_user_iovec_49829 ntfs_copy_from_user_iovec 3-6-0 49829 NULL
109217 +add_uuid_49831 add_uuid 4 49831 NULL
109218 +ath6kl_fwlog_block_read_49836 ath6kl_fwlog_block_read 3 49836 NULL
109219 +iraw_loop_49842 iraw_loop 0-1 49842 NULL
109220 +twl4030_write_49846 twl4030_write 2 49846 NULL
109221 +scsi_dispatch_cmd_entry_49848 scsi_dispatch_cmd_entry 3 49848 NULL
109222 +timeradd_entry_49850 timeradd_entry 3 49850 NULL
109223 +ubifs_destroy_tnc_subtree_49853 ubifs_destroy_tnc_subtree 0 49853 NULL
109224 +btrfs_subvolume_reserve_metadata_49859 btrfs_subvolume_reserve_metadata 3 49859 NULL
109225 +fiemap_count_to_size_49869 fiemap_count_to_size 0-1 49869 NULL
109226 +sctp_setsockopt_bindx_49870 sctp_setsockopt_bindx 3 49870 NULL
109227 +snd_mask_eq_49889 snd_mask_eq 0 49889 NULL
109228 +ceph_get_caps_49890 ceph_get_caps 0 49890 NULL
109229 +osc_brw_49896 osc_brw 4 49896 NULL
109230 +__cow_file_range_49901 __cow_file_range 5 49901 NULL
109231 +__copy_from_user_inatomic_nocache_49921 __copy_from_user_inatomic_nocache 3 49921 NULL
109232 +config_ep_by_speed_49939 config_ep_by_speed 0 49939 NULL
109233 +batadv_tt_realloc_packet_buff_49960 batadv_tt_realloc_packet_buff 4 49960 NULL
109234 +ieee80211_if_fmt_dtim_count_49987 ieee80211_if_fmt_dtim_count 3 49987 NULL
109235 +drm_buffer_copy_from_user_49990 drm_buffer_copy_from_user 3 49990 NULL
109236 +sta2x11_swiotlb_alloc_coherent_49994 sta2x11_swiotlb_alloc_coherent 2 49994 NULL
109237 +l2cap_chan_send_49995 l2cap_chan_send 3 49995 NULL
109238 +__module_alloc_50004 __module_alloc 1 50004 NULL
109239 +dn_mss_from_pmtu_50011 dn_mss_from_pmtu 0-2 50011 NULL
109240 +ptrace_readdata_50020 ptrace_readdata 2-4 50020 NULL
109241 +isdn_read_50021 isdn_read 3 50021 NULL
109242 +mdc_rename_pack_50023 mdc_rename_pack 4-6 50023 NULL
109243 +qp_alloc_queue_50028 qp_alloc_queue 1 50028 NULL
109244 +ioread8_50049 ioread8 0 50049 NULL
109245 +fuse_conn_max_background_write_50061 fuse_conn_max_background_write 3 50061 NULL
109246 +vmw_surface_destroy_size_50072 vmw_surface_destroy_size 0 50072 NULL
109247 +arch_setup_ht_irq_50073 arch_setup_ht_irq 1 50073 NULL
109248 +__kfifo_dma_in_prepare_50081 __kfifo_dma_in_prepare 4 50081 NULL
109249 +dev_set_alias_50084 dev_set_alias 3 50084 NULL
109250 +libcfs_ioctl_popdata_50087 libcfs_ioctl_popdata 3 50087 NULL
109251 +sock_setsockopt_50088 sock_setsockopt 5 50088 NULL
109252 +altera_swap_dr_50090 altera_swap_dr 2 50090 NULL
109253 +android_set_cntry_50100 android_set_cntry 0 50100 NULL
109254 +process_recorded_refs_50110 process_recorded_refs 0 50110 NULL
109255 +read_file_slot_50111 read_file_slot 3 50111 NULL
109256 +xfs_dir3_sf_entsize_50112 xfs_dir3_sf_entsize 0-3 50112 NULL
109257 +rx_streaming_interval_write_50120 rx_streaming_interval_write 3 50120 NULL
109258 +ocfs2_search_one_group_50125 ocfs2_search_one_group 0 50125 NULL nohasharray
109259 +jfs_direct_IO_50125 jfs_direct_IO 4 50125 &ocfs2_search_one_group_50125
109260 +SYSC_preadv_50134 SYSC_preadv 3 50134 NULL
109261 +copy_items_50140 copy_items 6 50140 NULL
109262 +tx_frag_need_fragmentation_read_50153 tx_frag_need_fragmentation_read 3 50153 NULL
109263 +reiserfs_bmap_count_50160 reiserfs_bmap_count 0 50160 NULL
109264 +kmalloc_node_50163 kmalloc_node 1-3 50163 NULL
109265 +rx_filter_ibss_filter_read_50167 rx_filter_ibss_filter_read 3 50167 NULL
109266 +ahd_probe_stack_size_50168 ahd_probe_stack_size 0 50168 NULL
109267 +odev_update_50169 odev_update 2 50169 NULL
109268 +ieee80211_if_fmt_dot11MeshHWMPRannInterval_50172 ieee80211_if_fmt_dot11MeshHWMPRannInterval 3 50172 NULL nohasharray
109269 +xfs_get_blocks_direct_50172 xfs_get_blocks_direct 2 50172 &ieee80211_if_fmt_dot11MeshHWMPRannInterval_50172 nohasharray
109270 +ubi_resize_volume_50172 ubi_resize_volume 2 50172 &xfs_get_blocks_direct_50172
109271 +ext3_do_update_inode_50178 ext3_do_update_inode 0 50178 NULL
109272 +cfg80211_roamed_bss_50198 cfg80211_roamed_bss 6-4 50198 NULL
109273 +cyttsp4_probe_50201 cyttsp4_probe 4 50201 NULL
109274 +rx_rx_timeout_wa_read_50204 rx_rx_timeout_wa_read 3 50204 NULL
109275 +compat_SyS_sendfile_50206 compat_SyS_sendfile 4 50206 NULL nohasharray
109276 +mthca_buddy_init_50206 mthca_buddy_init 2 50206 &compat_SyS_sendfile_50206
109277 +l2cap_sock_setsockopt_50207 l2cap_sock_setsockopt 5 50207 NULL
109278 +mon_bin_compat_ioctl_50234 mon_bin_compat_ioctl 3 50234 NULL
109279 +sg_kmalloc_50240 sg_kmalloc 1 50240 NULL
109280 +vma_dup_policy_50269 vma_dup_policy 0 50269 NULL
109281 +rxrpc_setsockopt_50286 rxrpc_setsockopt 5 50286 NULL
109282 +soc_codec_reg_show_50302 soc_codec_reg_show 0-3 50302 NULL
109283 +SYSC_flistxattr_50307 SYSC_flistxattr 3 50307 NULL
109284 +SYSC_sched_setaffinity_50310 SYSC_sched_setaffinity 2 50310 NULL
109285 +iterate_irefs_50313 iterate_irefs 0 50313 NULL
109286 +soc_camera_read_50319 soc_camera_read 3 50319 NULL
109287 +do_launder_page_50329 do_launder_page 0 50329 NULL
109288 +nouveau_engine_create__50331 nouveau_engine_create_ 7 50331 NULL
109289 +lpfc_idiag_pcicfg_read_50334 lpfc_idiag_pcicfg_read 3 50334 NULL
109290 +ocfs2_block_to_cluster_group_50337 ocfs2_block_to_cluster_group 2 50337 NULL nohasharray
109291 +snd_pcm_lib_writev_50337 snd_pcm_lib_writev 0-3 50337 &ocfs2_block_to_cluster_group_50337
109292 +tpm_read_50344 tpm_read 3 50344 NULL
109293 +sched_clock_remote_50347 sched_clock_remote 0 50347 NULL
109294 +efx_nic_update_stats_50352 efx_nic_update_stats 2 50352 NULL
109295 +kvm_arch_create_memslot_50354 kvm_arch_create_memslot 2 50354 NULL
109296 +isdn_ppp_read_50356 isdn_ppp_read 4 50356 NULL
109297 +ocfs2_figure_insert_type_50362 ocfs2_figure_insert_type 0 50362 NULL nohasharray
109298 +iwl_dbgfs_echo_test_write_50362 iwl_dbgfs_echo_test_write 3 50362 &ocfs2_figure_insert_type_50362
109299 +xfrm_send_migrate_50365 xfrm_send_migrate 5 50365 NULL
109300 +roccat_common2_receive_50369 roccat_common2_receive 4 50369 NULL
109301 +sl_alloc_bufs_50380 sl_alloc_bufs 2 50380 NULL
109302 +hash_ip6_expire_50390 hash_ip6_expire 3 50390 NULL
109303 +snd_mask_refine_last_50406 snd_mask_refine_last 0 50406 NULL
109304 +l2tp_ip_sendmsg_50411 l2tp_ip_sendmsg 4 50411 NULL
109305 +iscsi_create_conn_50425 iscsi_create_conn 2 50425 NULL
109306 +validate_acl_mac_addrs_50429 validate_acl_mac_addrs 0 50429 NULL
109307 +btrfs_error_discard_extent_50444 btrfs_error_discard_extent 2 50444 NULL nohasharray
109308 +ecryptfs_write_lower_page_segment_50444 ecryptfs_write_lower_page_segment 4 50444 &btrfs_error_discard_extent_50444
109309 +calc_csum_metadata_size_50448 calc_csum_metadata_size 0 50448 NULL
109310 +pgctrl_write_50453 pgctrl_write 3 50453 NULL
109311 +device_create_sys_dev_entry_50458 device_create_sys_dev_entry 0 50458 NULL
109312 +force_mapping_50464 force_mapping 2 50464 NULL
109313 +cfs_size_round_50472 cfs_size_round 0-1 50472 NULL
109314 +cdrom_read_cdda_50478 cdrom_read_cdda 4 50478 NULL
109315 +mei_io_cb_alloc_req_buf_50493 mei_io_cb_alloc_req_buf 2 50493 NULL
109316 +pwr_rcvd_awake_beacons_read_50505 pwr_rcvd_awake_beacons_read 3 50505 NULL
109317 +__ctzdi2_50536 __ctzdi2 1 50536 NULL
109318 +ath6kl_set_ap_probe_resp_ies_50539 ath6kl_set_ap_probe_resp_ies 3 50539 NULL
109319 +usbat_flash_write_data_50553 usbat_flash_write_data 4 50553 NULL
109320 +self_check_peb_vid_hdr_50563 self_check_peb_vid_hdr 0 50563 NULL nohasharray
109321 +ttm_agp_tt_create_50563 ttm_agp_tt_create 3 50563 &self_check_peb_vid_hdr_50563
109322 +fat_readpages_50582 fat_readpages 4 50582 NULL
109323 +iwl_dbgfs_missed_beacon_read_50584 iwl_dbgfs_missed_beacon_read 3 50584 NULL
109324 +build_inv_iommu_pages_50589 build_inv_iommu_pages 2-3 50589 NULL
109325 +xillybus_write_50605 xillybus_write 3 50605 NULL
109326 +rx_rx_checksum_result_read_50617 rx_rx_checksum_result_read 3 50617 NULL
109327 +ocfs2_split_extent_50618 ocfs2_split_extent 0 50618 NULL
109328 +sparse_early_usemaps_alloc_node_50623 sparse_early_usemaps_alloc_node 4 50623 NULL
109329 +__ffs_50625 __ffs 0-1 50625 NULL
109330 +regcache_rbtree_write_50629 regcache_rbtree_write 2 50629 NULL
109331 +simple_transaction_get_50633 simple_transaction_get 3 50633 NULL
109332 +ocfs2_do_insert_extent_50658 ocfs2_do_insert_extent 0 50658 NULL
109333 +ath6kl_tm_rx_event_50664 ath6kl_tm_rx_event 3 50664 NULL
109334 +bnad_debugfs_read_50665 bnad_debugfs_read 3 50665 NULL
109335 +ext2_try_to_allocate_with_rsv_50669 ext2_try_to_allocate_with_rsv 4-2-0 50669 NULL
109336 +prism2_read_fid_reg_50689 prism2_read_fid_reg 0 50689 NULL nohasharray
109337 +i2c_smbus_read_byte_data_50689 i2c_smbus_read_byte_data 0 50689 &prism2_read_fid_reg_50689
109338 +paging32_gva_to_gpa_50696 paging32_gva_to_gpa 2 50696 NULL
109339 +xfs_growfs_get_hdr_buf_50697 xfs_growfs_get_hdr_buf 3 50697 NULL
109340 +dev_mem_read_50706 dev_mem_read 3 50706 NULL
109341 +blk_check_plugged_50736 blk_check_plugged 3 50736 NULL
109342 +__ext3_get_inode_loc_50744 __ext3_get_inode_loc 0 50744 NULL
109343 +btrfs_truncate_free_space_cache_50769 btrfs_truncate_free_space_cache 0 50769 NULL
109344 +ocfs2_xattr_block_get_50773 ocfs2_xattr_block_get 0 50773 NULL
109345 +tm6000_read_write_usb_50774 tm6000_read_write_usb 7 50774 NULL
109346 +bio_alloc_map_data_50782 bio_alloc_map_data 2-1 50782 NULL
109347 +tpm_write_50798 tpm_write 3 50798 NULL
109348 +write_flush_50803 write_flush 3 50803 NULL
109349 +dvb_play_50814 dvb_play 3 50814 NULL
109350 +dpcm_show_state_50827 dpcm_show_state 0 50827 NULL
109351 +acpi_ev_install_gpe_block_50829 acpi_ev_install_gpe_block 2 50829 NULL
109352 +SetArea_50835 SetArea 4 50835 NULL nohasharray
109353 +create_mem_extents_50835 create_mem_extents 0 50835 &SetArea_50835 nohasharray
109354 +mask_from_50835 mask_from 0-1-2 50835 &create_mem_extents_50835
109355 +videobuf_dma_init_user_50839 videobuf_dma_init_user 3-4 50839 NULL
109356 +btrfs_search_slot_for_read_50843 btrfs_search_slot_for_read 0 50843 NULL
109357 +self_check_write_50856 self_check_write 0-5 50856 NULL
109358 +carl9170_debugfs_write_50857 carl9170_debugfs_write 3 50857 NULL
109359 +alloc_masks_50861 alloc_masks 3 50861 NULL
109360 +__percpu_counter_init_50878 __percpu_counter_init 0 50878 NULL
109361 +btrfs_insert_inode_ref_50884 btrfs_insert_inode_ref 0 50884 NULL
109362 +SyS_lgetxattr_50889 SyS_lgetxattr 4 50889 NULL
109363 +netlbl_secattr_catmap_walk_rng_50894 netlbl_secattr_catmap_walk_rng 0-2 50894 NULL
109364 +__bdev_writeseg_50903 __bdev_writeseg 4 50903 NULL
109365 +xfs_iext_remove_50909 xfs_iext_remove 3 50909 NULL
109366 +blk_rq_cur_sectors_50910 blk_rq_cur_sectors 0 50910 NULL
109367 +hash_recvmsg_50924 hash_recvmsg 4 50924 NULL
109368 +chd_dec_fetch_cdata_50926 chd_dec_fetch_cdata 3 50926 NULL
109369 +show_device_status_50947 show_device_status 0 50947 NULL
109370 +irq_timeout_write_50950 irq_timeout_write 3 50950 NULL
109371 +ocfs2_add_refcount_flag_50952 ocfs2_add_refcount_flag 6 50952 NULL
109372 +sdio_uart_write_50954 sdio_uart_write 3 50954 NULL
109373 +SyS_setxattr_50957 SyS_setxattr 4 50957 NULL
109374 +btrfs_del_csums_50969 btrfs_del_csums 0 50969 NULL
109375 +__btrfs_mod_ref_50973 __btrfs_mod_ref 0 50973 NULL
109376 +iwl_statistics_flag_50981 iwl_statistics_flag 0-3 50981 NULL
109377 +timeout_write_50991 timeout_write 3 50991 NULL
109378 +wm831x_irq_map_50995 wm831x_irq_map 2 50995 NULL
109379 +proc_write_51003 proc_write 3 51003 NULL
109380 +jbd2_journal_extend_51012 jbd2_journal_extend 2-0 51012 NULL
109381 +lbs_dev_info_51023 lbs_dev_info 3 51023 NULL
109382 +ntfs_attr_find_51028 ntfs_attr_find 0 51028 NULL nohasharray
109383 +fuse_conn_congestion_threshold_read_51028 fuse_conn_congestion_threshold_read 3 51028 &ntfs_attr_find_51028
109384 +BcmGetSectionValEndOffset_51039 BcmGetSectionValEndOffset 0 51039 NULL
109385 +dump_midi_51040 dump_midi 3 51040 NULL
109386 +usb_get_descriptor_51041 usb_get_descriptor 0 51041 NULL
109387 +srpt_alloc_ioctx_51042 srpt_alloc_ioctx 2-3 51042 NULL
109388 +do_arpt_set_ctl_51053 do_arpt_set_ctl 4 51053 NULL
109389 +wusb_prf_64_51065 wusb_prf_64 7 51065 NULL
109390 +jbd2_journal_init_revoke_51088 jbd2_journal_init_revoke 2 51088 NULL
109391 +solo_enc_v4l2_init_51094 solo_enc_v4l2_init 2 51094 NULL
109392 +__ocfs2_find_path_51096 __ocfs2_find_path 0 51096 NULL
109393 +btrfs_del_item_51110 btrfs_del_item 0 51110 NULL nohasharray
109394 +ti_recv_51110 ti_recv 3 51110 &btrfs_del_item_51110
109395 +uasp_prepare_r_request_51124 uasp_prepare_r_request 0 51124 NULL
109396 +nfs_map_name_to_uid_51132 nfs_map_name_to_uid 3 51132 NULL
109397 +alloc_rtllib_51136 alloc_rtllib 1 51136 NULL
109398 +simple_xattr_set_51140 simple_xattr_set 4 51140 NULL
109399 +set_dirty_51144 set_dirty 3 51144 NULL
109400 +xfs_trans_get_efd_51148 xfs_trans_get_efd 3 51148 NULL
109401 +walk_page_buffers_51170 walk_page_buffers 0 51170 NULL
109402 +snd_pcm_unlink_51210 snd_pcm_unlink 0 51210 NULL
109403 +uhci_frame_skel_link_51213 uhci_frame_skel_link 2 51213 NULL
109404 +nf_ct_ext_create_51232 nf_ct_ext_create 3 51232 NULL
109405 +snd_pcm_write_51235 snd_pcm_write 3 51235 NULL
109406 +tipc_send_51238 tipc_send 4 51238 NULL
109407 +drm_property_create_51239 drm_property_create 4 51239 NULL
109408 +__mxt_read_reg_51249 __mxt_read_reg 0 51249 NULL
109409 +st_read_51251 st_read 3 51251 NULL
109410 +compat_dccp_setsockopt_51263 compat_dccp_setsockopt 5 51263 NULL
109411 +target_alloc_sgl_51264 target_alloc_sgl 3 51264 NULL
109412 +dvb_audio_write_51275 dvb_audio_write 3 51275 NULL
109413 +ipwireless_network_packet_received_51277 ipwireless_network_packet_received 4 51277 NULL
109414 +zone_reclaimable_pages_51283 zone_reclaimable_pages 0 51283 NULL nohasharray
109415 +__get_cur_name_and_parent_51283 __get_cur_name_and_parent 0 51283 &zone_reclaimable_pages_51283
109416 +pvr2_std_id_to_str_51288 pvr2_std_id_to_str 2 51288 NULL
109417 +bnad_debugfs_read_regrd_51308 bnad_debugfs_read_regrd 3 51308 NULL
109418 +get_cell_51316 get_cell 2 51316 NULL
109419 +init_map_ipmac_51317 init_map_ipmac 4-3-5 51317 NULL
109420 +ocfs2_read_inode_block_51319 ocfs2_read_inode_block 0 51319 NULL
109421 +alloc_hippi_dev_51320 alloc_hippi_dev 1 51320 NULL
109422 +ext2_xattr_get_51327 ext2_xattr_get 0 51327 NULL
109423 +alloc_smp_req_51337 alloc_smp_req 1 51337 NULL nohasharray
109424 +compat_arch_ptrace_51337 compat_arch_ptrace 3 51337 &alloc_smp_req_51337
109425 +ipw_get_event_log_len_51341 ipw_get_event_log_len 0 51341 NULL
109426 +ieee80211_if_fmt_estab_plinks_51370 ieee80211_if_fmt_estab_plinks 3 51370 NULL
109427 +radeon_kms_compat_ioctl_51371 radeon_kms_compat_ioctl 2 51371 NULL
109428 +pci_sriov_resource_alignment_51406 pci_sriov_resource_alignment 0 51406 NULL
109429 +ceph_sync_read_51410 ceph_sync_read 3-0 51410 NULL
109430 +get_first_ref_51413 get_first_ref 0 51413 NULL
109431 +__btrfs_end_transaction_51420 __btrfs_end_transaction 0 51420 NULL
109432 +blk_register_region_51424 blk_register_region 1-2 51424 NULL
109433 +mwifiex_rdeeprom_read_51429 mwifiex_rdeeprom_read 3 51429 NULL
109434 +hfsplus_brec_read_51436 hfsplus_brec_read 0 51436 NULL
109435 +ieee80211_if_read_dot11MeshHWMPRootMode_51441 ieee80211_if_read_dot11MeshHWMPRootMode 3 51441 NULL
109436 +print_devstats_dot11ACKFailureCount_51443 print_devstats_dot11ACKFailureCount 3 51443 NULL
109437 +____alloc_ei_netdev_51475 ____alloc_ei_netdev 1 51475 NULL
109438 +xfs_buf_get_uncached_51477 xfs_buf_get_uncached 2 51477 NULL
109439 +btrfs_find_space_cluster_51482 btrfs_find_space_cluster 5 51482 NULL
109440 +kvm_fetch_guest_virt_51493 kvm_fetch_guest_virt 4-2 51493 NULL
109441 +ieee80211_if_write_uapsd_queues_51526 ieee80211_if_write_uapsd_queues 3 51526 NULL
109442 +key_search_51533 key_search 0 51533 NULL
109443 +load_pdptrs_51541 load_pdptrs 3 51541 NULL
109444 +__alloc_eip_netdev_51549 __alloc_eip_netdev 1 51549 NULL
109445 +ixgb_get_eeprom_len_51586 ixgb_get_eeprom_len 0 51586 NULL
109446 +get_cur_path_51589 get_cur_path 0 51589 NULL nohasharray
109447 +snd_interval_refine_first_51589 snd_interval_refine_first 0 51589 &get_cur_path_51589
109448 +aac_convert_sgraw2_51598 aac_convert_sgraw2 4 51598 NULL
109449 +table_size_to_number_of_entries_51613 table_size_to_number_of_entries 0-1 51613 NULL
109450 +extent_fiemap_51621 extent_fiemap 3 51621 NULL
109451 +sctp_auth_create_key_51641 sctp_auth_create_key 1 51641 NULL
109452 +iscsi_create_session_51647 iscsi_create_session 3 51647 NULL
109453 +get_new_cssid_51665 get_new_cssid 2 51665 NULL
109454 +ps_upsd_utilization_read_51669 ps_upsd_utilization_read 3 51669 NULL
109455 +sctp_setsockopt_associnfo_51684 sctp_setsockopt_associnfo 3 51684 NULL
109456 +host_mapping_level_51696 host_mapping_level 2-0 51696 NULL
109457 +sel_write_access_51704 sel_write_access 3 51704 NULL
109458 +tty_cdev_add_51714 tty_cdev_add 2-4 51714 NULL
109459 +v9fs_alloc_rdir_buf_51716 v9fs_alloc_rdir_buf 2 51716 NULL
109460 +drm_compat_ioctl_51717 drm_compat_ioctl 2 51717 NULL
109461 +sg_read_oxfer_51724 sg_read_oxfer 3 51724 NULL
109462 +dbg_check_lpt_nodes_51727 dbg_check_lpt_nodes 0 51727 NULL
109463 +cm4040_read_51732 cm4040_read 3 51732 NULL
109464 +get_user_pages_fast_51751 get_user_pages_fast 1-2-0 51751 NULL
109465 +ifx_spi_insert_flip_string_51752 ifx_spi_insert_flip_string 3 51752 NULL
109466 +if_write_51756 if_write 3 51756 NULL
109467 +ext4_ext_create_new_leaf_51763 ext4_ext_create_new_leaf 0 51763 NULL
109468 +__fswab32_51781 __fswab32 0 51781 NULL
109469 +to_ratio_51809 to_ratio 1-2 51809 NULL
109470 +qib_alloc_devdata_51819 qib_alloc_devdata 2 51819 NULL
109471 +buffer_from_user_51826 buffer_from_user 3 51826 NULL
109472 +max732x_irq_pending_51840 max732x_irq_pending 0 51840 NULL
109473 +ioread32_51847 ioread32 0 51847 NULL nohasharray
109474 +read_file_tgt_tx_stats_51847 read_file_tgt_tx_stats 3 51847 &ioread32_51847
109475 +do_readv_writev_51849 do_readv_writev 4 51849 NULL
109476 +SYSC_sendto_51852 SYSC_sendto 6 51852 NULL
109477 +bm_page_io_async_51858 bm_page_io_async 2 51858 NULL
109478 +pointer_size_read_51863 pointer_size_read 3 51863 NULL
109479 +mlx4_alloc_db_from_pgdir_51865 mlx4_alloc_db_from_pgdir 3 51865 NULL
109480 +get_indirect_ea_51869 get_indirect_ea 4 51869 NULL
109481 +user_read_51881 user_read 3 51881 NULL
109482 +dbAdjCtl_51888 dbAdjCtl 0 51888 NULL
109483 +SyS_mq_timedsend_51896 SyS_mq_timedsend 3 51896 NULL nohasharray
109484 +virt_to_phys_51896 virt_to_phys 0 51896 &SyS_mq_timedsend_51896
109485 +commit_fs_roots_51898 commit_fs_roots 0 51898 NULL
109486 +uvhub_to_first_node_51916 uvhub_to_first_node 0 51916 NULL
109487 +wmi_set_ie_51919 wmi_set_ie 3 51919 NULL
109488 +dbg_status_buf_51930 dbg_status_buf 2 51930 NULL
109489 +__tcp_mtu_to_mss_51938 __tcp_mtu_to_mss 0-2 51938 NULL
109490 +xfrm_alg_len_51940 xfrm_alg_len 0 51940 NULL
109491 +irq_dispose_mapping_51941 irq_dispose_mapping 1 51941 NULL
109492 +scsi_get_vpd_page_51951 scsi_get_vpd_page 4 51951 NULL
109493 +arizona_free_irq_51969 arizona_free_irq 2 51969 NULL nohasharray
109494 +snd_mask_min_51969 snd_mask_min 0 51969 &arizona_free_irq_51969
109495 +__blkdev_get_51972 __blkdev_get 0 51972 NULL nohasharray
109496 +read_page_51972 read_page 2 51972 &__blkdev_get_51972
109497 +get_zone_51981 get_zone 0-1 51981 NULL
109498 +ath6kl_sdio_alloc_prep_scat_req_51986 ath6kl_sdio_alloc_prep_scat_req 2 51986 NULL
109499 +_c4iw_write_mem_dma_51991 _c4iw_write_mem_dma 3 51991 NULL
109500 +ntfs_attr_size_51994 ntfs_attr_size 0 51994 NULL
109501 +dwc3_mode_write_51997 dwc3_mode_write 3 51997 NULL
109502 +skb_copy_datagram_from_iovec_52014 skb_copy_datagram_from_iovec 4-2-5 52014 NULL
109503 +rdmalt_52022 rdmalt 0 52022 NULL
109504 +override_release_52032 override_release 2 52032 NULL
109505 +end_port_52042 end_port 0 52042 NULL
109506 +dma_rx_errors_read_52045 dma_rx_errors_read 3 52045 NULL
109507 +msnd_fifo_write_52052 msnd_fifo_write 0-3 52052 NULL
109508 +dvb_ringbuffer_avail_52057 dvb_ringbuffer_avail 0 52057 NULL
109509 +__fuse_request_alloc_52060 __fuse_request_alloc 1 52060 NULL
109510 +isofs_readpages_52067 isofs_readpages 4 52067 NULL
109511 +nsm_get_handle_52089 nsm_get_handle 4 52089 NULL
109512 +ulist_add_merge_52096 ulist_add_merge 0 52096 NULL
109513 +o2net_debug_read_52105 o2net_debug_read 3 52105 NULL
109514 +split_scan_timeout_write_52128 split_scan_timeout_write 3 52128 NULL
109515 +retry_count_read_52129 retry_count_read 3 52129 NULL
109516 +snd_pcm_channel_info_user_52135 snd_pcm_channel_info_user 0 52135 NULL
109517 +gdm_usb_hci_send_52138 gdm_usb_hci_send 3 52138 NULL
109518 +sub_alloc_52140 sub_alloc 0 52140 NULL nohasharray
109519 +zram_meta_alloc_52140 zram_meta_alloc 1 52140 &sub_alloc_52140
109520 +hysdn_conf_write_52145 hysdn_conf_write 3 52145 NULL nohasharray
109521 +ext2_alloc_blocks_52145 ext2_alloc_blocks 2-0 52145 &hysdn_conf_write_52145
109522 +htable_size_52148 htable_size 0-1 52148 NULL
109523 +smk_write_load2_52155 smk_write_load2 3 52155 NULL
109524 +ieee80211_if_read_dot11MeshRetryTimeout_52168 ieee80211_if_read_dot11MeshRetryTimeout 3 52168 NULL
109525 +mga_compat_ioctl_52170 mga_compat_ioctl 2 52170 NULL
109526 +print_prefix_52176 print_prefix 0 52176 NULL
109527 +proc_pid_readlink_52186 proc_pid_readlink 3 52186 NULL
109528 +vmci_qp_broker_alloc_52216 vmci_qp_broker_alloc 6-5 52216 NULL
109529 +do_dmabuf_dirty_ldu_52241 do_dmabuf_dirty_ldu 6 52241 NULL
109530 +fuse_request_alloc_52243 fuse_request_alloc 1 52243 NULL nohasharray
109531 +xfs_iomap_eof_align_last_fsb_52243 xfs_iomap_eof_align_last_fsb 3 52243 &fuse_request_alloc_52243
109532 +ocfs2_try_to_merge_extent_52244 ocfs2_try_to_merge_extent 0 52244 NULL
109533 +pm80x_request_irq_52250 pm80x_request_irq 2 52250 NULL
109534 +mdiobus_alloc_size_52259 mdiobus_alloc_size 1 52259 NULL
109535 +shrink_slab_52261 shrink_slab 2-3 52261 NULL
109536 +hva_to_pfn_slow_52262 hva_to_pfn_slow 1 52262 NULL
109537 +sisusbcon_do_font_op_52271 sisusbcon_do_font_op 9 52271 NULL
109538 +handle_supp_msgs_52284 handle_supp_msgs 4 52284 NULL
109539 +kobject_set_name_vargs_52309 kobject_set_name_vargs 0 52309 NULL
109540 +read_file_reset_52310 read_file_reset 3 52310 NULL
109541 +request_asymmetric_key_52317 request_asymmetric_key 4-2 52317 NULL
109542 +hwflags_read_52318 hwflags_read 3 52318 NULL
109543 +snd_pcm_hw_free_52327 snd_pcm_hw_free 0 52327 NULL
109544 +ntfs_rl_split_52328 ntfs_rl_split 4-2 52328 NULL
109545 +test_unaligned_bulk_52333 test_unaligned_bulk 3 52333 NULL
109546 +hur_len_52339 hur_len 0 52339 NULL
109547 +bytes_to_frames_52362 bytes_to_frames 0-2 52362 NULL
109548 +copy_entries_to_user_52367 copy_entries_to_user 1 52367 NULL
109549 +iwl_dump_fh_52371 iwl_dump_fh 0 52371 NULL
109550 +hfsplus_find_attr_52374 hfsplus_find_attr 0 52374 NULL
109551 +ocfs2_journal_access_eb_52377 ocfs2_journal_access_eb 0 52377 NULL
109552 +mq_emit_config_values_52378 mq_emit_config_values 3 52378 NULL
109553 +isdn_writebuf_stub_52383 isdn_writebuf_stub 4 52383 NULL
109554 +jfs_setxattr_52389 jfs_setxattr 4 52389 NULL
109555 +aer_inject_write_52399 aer_inject_write 3 52399 NULL nohasharray
109556 +cl_page_own_52399 cl_page_own 0 52399 &aer_inject_write_52399
109557 +cgroup_file_write_52417 cgroup_file_write 3 52417 NULL
109558 +line6_midibuf_init_52425 line6_midibuf_init 2 52425 NULL
109559 +hso_serial_common_create_52428 hso_serial_common_create 4 52428 NULL
109560 +delay_status_52431 delay_status 5 52431 NULL
109561 +ath6kl_delete_qos_write_52435 ath6kl_delete_qos_write 3 52435 NULL
109562 +ieee80211_if_fmt_num_sta_ps_52438 ieee80211_if_fmt_num_sta_ps 3 52438 NULL
109563 +acpi_nvs_for_each_region_52448 acpi_nvs_for_each_region 0 52448 NULL
109564 +alauda_read_data_52452 alauda_read_data 3 52452 NULL
109565 +ieee80211_alloc_txb_52477 ieee80211_alloc_txb 1 52477 NULL
109566 +usb_tranzport_write_52479 usb_tranzport_write 3 52479 NULL
109567 +ocfs2_extend_no_holes_52483 ocfs2_extend_no_holes 3-4 52483 NULL
109568 +fd_do_rw_52495 fd_do_rw 3 52495 NULL
109569 +qib_user_sdma_pin_pages_52498 qib_user_sdma_pin_pages 3-5-4 52498 NULL
109570 +int_tasklet_entry_52500 int_tasklet_entry 3 52500 NULL
109571 +lmv_get_easize_52504 lmv_get_easize 0 52504 NULL
109572 +pm_qos_power_write_52513 pm_qos_power_write 3 52513 NULL
109573 +bt_sock_stream_recvmsg_52518 bt_sock_stream_recvmsg 4 52518 NULL
109574 +dup_variable_bug_52525 dup_variable_bug 3 52525 NULL
109575 +raw_recvmsg_52529 raw_recvmsg 4 52529 NULL
109576 +from_oblock_52546 from_oblock 0-1 52546 NULL
109577 +dccpprobe_read_52549 dccpprobe_read 3 52549 NULL
109578 +ocfs2_make_right_split_rec_52562 ocfs2_make_right_split_rec 3 52562 NULL
109579 +debug_level_proc_write_52572 debug_level_proc_write 3 52572 NULL
109580 +isku_sysfs_read_macro_52587 isku_sysfs_read_macro 6 52587 NULL
109581 +snd_pcm_sw_params_52594 snd_pcm_sw_params 0 52594 NULL
109582 +SyS_setsockopt_52610 SyS_setsockopt 5 52610 NULL
109583 +ll_sa_entry_alloc_52611 ll_sa_entry_alloc 4 52611 NULL
109584 +affs_set_blocksize_52625 affs_set_blocksize 2 52625 NULL
109585 +tps80031_writes_52638 tps80031_writes 3-4 52638 NULL
109586 +brcmf_sdio_assert_info_52653 brcmf_sdio_assert_info 4 52653 NULL
109587 +perf_trace_sched_stat_template_52656 perf_trace_sched_stat_template 3 52656 NULL
109588 +nvme_queue_extra_52661 nvme_queue_extra 0-1 52661 NULL
109589 +htb_add_class_to_row_52663 htb_add_class_to_row 3 52663 NULL
109590 +posix_acl_equiv_mode_52666 posix_acl_equiv_mode 0 52666 NULL
109591 +SYSC_gethostname_52677 SYSC_gethostname 2 52677 NULL
109592 +ntfs_get_nr_significant_bytes_52688 ntfs_get_nr_significant_bytes 0 52688 NULL
109593 +nvd0_disp_pioc_create__52693 nvd0_disp_pioc_create_ 5 52693 NULL
109594 +vendorextnWriteSection_52698 vendorextnWriteSection 0 52698 NULL
109595 +nouveau_client_create__52715 nouveau_client_create_ 5 52715 NULL
109596 +__dm_stat_bio_52722 __dm_stat_bio 3 52722 NULL
109597 +cx25840_ir_rx_read_52724 cx25840_ir_rx_read 3 52724 NULL
109598 +blkcipher_next_slow_52733 blkcipher_next_slow 4-3 52733 NULL
109599 +relay_alloc_page_array_52735 relay_alloc_page_array 1 52735 NULL
109600 +hfcsusb_rx_frame_52745 hfcsusb_rx_frame 3 52745 NULL
109601 +carl9170_debugfs_vif_dump_read_52755 carl9170_debugfs_vif_dump_read 3 52755 NULL
109602 +ieee80211_if_read_beacon_timeout_52756 ieee80211_if_read_beacon_timeout 3 52756 NULL
109603 +radeon_get_ib_value_52757 radeon_get_ib_value 0 52757 NULL
109604 +nvme_trans_ext_inq_page_52776 nvme_trans_ext_inq_page 3 52776 NULL
109605 +pwr_rcvd_beacons_read_52836 pwr_rcvd_beacons_read 3 52836 NULL
109606 +map_try_harder_52846 map_try_harder 2-3 52846 NULL
109607 +ext2_xattr_set_acl_52857 ext2_xattr_set_acl 4 52857 NULL
109608 +process_deleted_xattr_52861 process_deleted_xattr 0 52861 NULL
109609 +mon_bin_get_event_52863 mon_bin_get_event 6-4 52863 NULL
109610 +twl6030_gpadc_write_52867 twl6030_gpadc_write 1 52867 NULL
109611 +qib_decode_6120_err_52876 qib_decode_6120_err 3 52876 NULL
109612 +twlreg_write_52880 twlreg_write 3 52880 NULL
109613 +pvr2_ctrl_value_to_sym_internal_52881 pvr2_ctrl_value_to_sym_internal 5 52881 NULL
109614 +cache_read_procfs_52882 cache_read_procfs 3 52882 NULL
109615 +kvm_kvzalloc_52894 kvm_kvzalloc 1 52894 NULL
109616 +arizona_request_irq_52908 arizona_request_irq 2 52908 NULL
109617 +create_vtbl_52909 create_vtbl 0 52909 NULL
109618 +dio_bio_reap_52913 dio_bio_reap 0 52913 NULL
109619 +__kfifo_out_peek_r_52919 __kfifo_out_peek_r 3 52919 NULL
109620 +iblock_get_bio_52936 iblock_get_bio 3 52936 NULL
109621 +__nodes_remap_52951 __nodes_remap 5 52951 NULL
109622 +send_packet_52960 send_packet 4 52960 NULL
109623 +ieee80211_if_fmt_fwded_mcast_52961 ieee80211_if_fmt_fwded_mcast 3 52961 NULL
109624 +tx_tx_exch_read_52986 tx_tx_exch_read 3 52986 NULL
109625 +num_node_state_52989 num_node_state 0 52989 NULL
109626 +efivarfs_file_write_53000 efivarfs_file_write 3 53000 NULL
109627 +uasp_alloc_stream_res_53015 uasp_alloc_stream_res 0 53015 NULL
109628 +btrfs_free_and_pin_reserved_extent_53016 btrfs_free_and_pin_reserved_extent 2 53016 NULL
109629 +tx_tx_exch_pending_read_53018 tx_tx_exch_pending_read 3 53018 NULL
109630 +ext4_meta_bg_first_group_53031 ext4_meta_bg_first_group 0-2 53031 NULL
109631 +ocfs2_new_leaf_refcount_block_53036 ocfs2_new_leaf_refcount_block 0 53036 NULL
109632 +bio_cur_bytes_53037 bio_cur_bytes 0 53037 NULL
109633 +regcache_lzo_block_count_53056 regcache_lzo_block_count 0 53056 NULL
109634 +cfi_read_query_53066 cfi_read_query 0 53066 NULL
109635 +iwl_dbgfs_interrupt_write_53069 iwl_dbgfs_interrupt_write 3 53069 NULL
109636 +mwifiex_debug_read_53074 mwifiex_debug_read 3 53074 NULL
109637 +page_to_nid_53085 page_to_nid 0 53085 NULL
109638 +qib_resize_cq_53090 qib_resize_cq 2 53090 NULL
109639 +insert_new_root_53097 insert_new_root 0 53097 NULL
109640 +verity_status_53120 verity_status 5 53120 NULL
109641 +__copy_from_user_ll_nocache_nozero_53123 __copy_from_user_ll_nocache_nozero 0-3 53123 NULL
109642 +brcmf_usb_dl_cmd_53130 brcmf_usb_dl_cmd 4 53130 NULL
109643 +btrfs_search_forward_53137 btrfs_search_forward 0 53137 NULL
109644 +ps_poll_ps_poll_max_ap_turn_read_53140 ps_poll_ps_poll_max_ap_turn_read 3 53140 NULL
109645 +ieee80211_bss_info_update_53170 ieee80211_bss_info_update 4 53170 NULL
109646 +btrfs_io_bio_alloc_53179 btrfs_io_bio_alloc 2 53179 NULL
109647 +clear_capture_buf_53192 clear_capture_buf 2 53192 NULL
109648 +SyS_init_module_53202 SyS_init_module 2 53202 NULL
109649 +mtdoops_erase_block_53206 mtdoops_erase_block 2 53206 NULL
109650 +fixup_user_fault_53210 fixup_user_fault 3 53210 NULL
109651 +tx_tx_start_data_read_53219 tx_tx_start_data_read 3 53219 NULL
109652 +ptlrpc_lprocfs_req_history_max_seq_write_53243 ptlrpc_lprocfs_req_history_max_seq_write 3 53243 NULL
109653 +hfsplus_xattr_set_posix_acl_53249 hfsplus_xattr_set_posix_acl 4 53249 NULL
109654 +xfs_trans_read_buf_map_53258 xfs_trans_read_buf_map 5 53258 NULL
109655 +wil_write_file_ssid_53266 wil_write_file_ssid 3 53266 NULL
109656 +btrfs_file_extent_num_bytes_53269 btrfs_file_extent_num_bytes 0 53269 NULL
109657 +tsk_fork_get_node_53271 tsk_fork_get_node 0 53271 NULL
109658 +btrfs_find_highest_objectid_53284 btrfs_find_highest_objectid 0 53284 NULL
109659 +setup_leaf_for_split_53312 setup_leaf_for_split 0 53312 NULL
109660 +ftrace_profile_write_53327 ftrace_profile_write 3 53327 NULL
109661 +setup_cluster_no_bitmap_53328 setup_cluster_no_bitmap 0 53328 NULL
109662 +gsm_control_reply_53333 gsm_control_reply 4 53333 NULL
109663 +vm_mmap_53339 vm_mmap 0 53339 NULL
109664 +vendorextnIoctl_53350 vendorextnIoctl 0 53350 NULL nohasharray
109665 +btrfs_find_all_roots_53350 btrfs_find_all_roots 0 53350 &vendorextnIoctl_53350
109666 +read_6120_creg32_53363 read_6120_creg32 0 53363 NULL
109667 +sock_setbindtodevice_53369 sock_setbindtodevice 3 53369 NULL
109668 +get_random_bytes_arch_53370 get_random_bytes_arch 2 53370 NULL
109669 +isolate_lru_page_53417 isolate_lru_page 0 53417 NULL
109670 +i915_gem_execbuffer_relocate_object_53435 i915_gem_execbuffer_relocate_object 0 53435 NULL
109671 +isr_cmd_cmplt_read_53439 isr_cmd_cmplt_read 3 53439 NULL
109672 +mwifiex_info_read_53447 mwifiex_info_read 3 53447 NULL
109673 +apei_exec_run_optional_53452 apei_exec_run_optional 0 53452 NULL
109674 +paging64_prefetch_gpte_53468 paging64_prefetch_gpte 4 53468 NULL
109675 +iowarrior_read_53483 iowarrior_read 3 53483 NULL
109676 +osd_req_write_kern_53486 osd_req_write_kern 5 53486 NULL
109677 +do_verify_xattr_datum_53499 do_verify_xattr_datum 0 53499 NULL
109678 +ext4_ext_grow_indepth_53503 ext4_ext_grow_indepth 0 53503 NULL
109679 +snd_pcm_format_physical_width_53505 snd_pcm_format_physical_width 0 53505 NULL
109680 +dbAllocNext_53506 dbAllocNext 0 53506 NULL
109681 +ocfs2_xattr_set_acl_53508 ocfs2_xattr_set_acl 4 53508 NULL
109682 +check_acl_53512 check_acl 0 53512 NULL
109683 +send_utimes_53516 send_utimes 0 53516 NULL
109684 +SYSC_bind_53582 SYSC_bind 3 53582 NULL
109685 +cifs_utf16_bytes_53593 cifs_utf16_bytes 0 53593 NULL
109686 +proc_uid_map_write_53596 proc_uid_map_write 3 53596 NULL
109687 +gfn_to_pfn_async_53597 gfn_to_pfn_async 2 53597 NULL
109688 +pfkey_recvmsg_53604 pfkey_recvmsg 4 53604 NULL
109689 +___alloc_bootmem_nopanic_53626 ___alloc_bootmem_nopanic 1 53626 NULL
109690 +xd_write_multiple_pages_53633 xd_write_multiple_pages 6-5 53633 NULL
109691 +stmpe_gpio_request_53634 stmpe_gpio_request 2 53634 NULL nohasharray
109692 +ccid_getsockopt_builtin_ccids_53634 ccid_getsockopt_builtin_ccids 2 53634 &stmpe_gpio_request_53634
109693 +nr_sendmsg_53656 nr_sendmsg 4 53656 NULL
109694 +fuse_fill_write_pages_53682 fuse_fill_write_pages 0-4 53682 NULL
109695 +v4l2_event_subscribe_53687 v4l2_event_subscribe 3 53687 NULL
109696 +bdev_logical_block_size_53690 bdev_logical_block_size 0 53690 NULL nohasharray
109697 +igb_alloc_q_vector_53690 igb_alloc_q_vector 6-4 53690 &bdev_logical_block_size_53690
109698 +find_overflow_devnum_53711 find_overflow_devnum 0 53711 NULL
109699 +bio_integrity_split_53714 bio_integrity_split 3 53714 NULL
109700 +__ocfs2_resv_find_window_53721 __ocfs2_resv_find_window 3 53721 NULL
109701 +__proc_debug_mb_53732 __proc_debug_mb 5 53732 NULL
109702 +wdm_write_53735 wdm_write 3 53735 NULL
109703 +ext3_try_to_allocate_with_rsv_53737 ext3_try_to_allocate_with_rsv 5-3-0 53737 NULL
109704 +da9052_disable_irq_53745 da9052_disable_irq 2 53745 NULL
109705 +lpfc_idiag_queacc_read_qe_53755 lpfc_idiag_queacc_read_qe 0-2 53755 NULL nohasharray
109706 +amdtp_out_stream_get_max_payload_53755 amdtp_out_stream_get_max_payload 0 53755 &lpfc_idiag_queacc_read_qe_53755
109707 +ext2_acl_count_53773 ext2_acl_count 0-1 53773 NULL
109708 +__kfifo_dma_in_prepare_r_53792 __kfifo_dma_in_prepare_r 4-5 53792 NULL
109709 +qp_alloc_host_work_53798 qp_alloc_host_work 5-3 53798 NULL
109710 +regmap_raw_write_53803 regmap_raw_write 2-4 53803 NULL
109711 +lpfc_idiag_ctlacc_read_reg_53809 lpfc_idiag_ctlacc_read_reg 0-3 53809 NULL
109712 +nls_nullsize_53815 nls_nullsize 0 53815 NULL
109713 +insert_pfn_53816 insert_pfn 3-0 53816 NULL
109714 +setup_data_read_53822 setup_data_read 3 53822 NULL
109715 +pms_read_53873 pms_read 3 53873 NULL
109716 +ieee80211_if_fmt_dropped_frames_congestion_53883 ieee80211_if_fmt_dropped_frames_congestion 3 53883 NULL
109717 +ocfs2_rm_xattr_cluster_53900 ocfs2_rm_xattr_cluster 4-5-3 53900 NULL nohasharray
109718 +SyS_setgroups_53900 SyS_setgroups 1 53900 &ocfs2_rm_xattr_cluster_53900
109719 +azx_via_get_position_53916 azx_via_get_position 0 53916 NULL
109720 +usb_serial_generic_write_53927 usb_serial_generic_write 4 53927 NULL
109721 +ocfs2_make_clusters_writable_53938 ocfs2_make_clusters_writable 5-4-0 53938 NULL
109722 +idetape_chrdev_write_53976 idetape_chrdev_write 3 53976 NULL
109723 +mthca_reg_user_mr_53980 mthca_reg_user_mr 2-3 53980 NULL
109724 +__ocfs2_xattr_set_value_outside_53981 __ocfs2_xattr_set_value_outside 5 53981 NULL
109725 +ieee80211_if_fmt_dot11MeshHWMPperrMinInterval_53998 ieee80211_if_fmt_dot11MeshHWMPperrMinInterval 3 53998 NULL
109726 +send_clone_54011 send_clone 0 54011 NULL
109727 +hfsplus_attr_build_key_54013 hfsplus_attr_build_key 0 54013 NULL
109728 +snd_pcm_lib_write_transfer_54018 snd_pcm_lib_write_transfer 5-2-4 54018 NULL
109729 +mdc_kuc_write_54019 mdc_kuc_write 3 54019 NULL
109730 +ipxrtr_route_packet_54036 ipxrtr_route_packet 4 54036 NULL
109731 +pipeline_dec_packet_out_read_54052 pipeline_dec_packet_out_read 3 54052 NULL
109732 +nl80211_send_disconnected_54056 nl80211_send_disconnected 5 54056 NULL
109733 +rproc_state_read_54057 rproc_state_read 3 54057 NULL
109734 +btrfs_start_transaction_54066 btrfs_start_transaction 2 54066 NULL
109735 +_malloc_54077 _malloc 1 54077 NULL
109736 +bitmap_bitremap_54096 bitmap_bitremap 4 54096 NULL
109737 +altera_set_ir_pre_54103 altera_set_ir_pre 2 54103 NULL nohasharray
109738 +lustre_posix_acl_xattr_filter_54103 lustre_posix_acl_xattr_filter 2 54103 &altera_set_ir_pre_54103
109739 +__comedi_buf_write_alloc_54112 __comedi_buf_write_alloc 0-2 54112 NULL
109740 +strn_len_54122 strn_len 0 54122 NULL
109741 +isku_receive_54130 isku_receive 4 54130 NULL
109742 +isr_host_acknowledges_read_54136 isr_host_acknowledges_read 3 54136 NULL
109743 +irq_blk_threshold_write_54138 irq_blk_threshold_write 3 54138 NULL
109744 +memcpy_toiovec_54166 memcpy_toiovec 3 54166 NULL
109745 +nouveau_falcon_create__54169 nouveau_falcon_create_ 8 54169 NULL
109746 +p9_client_prepare_req_54175 p9_client_prepare_req 3 54175 NULL
109747 +devm_request_threaded_irq_54215 devm_request_threaded_irq 0 54215 NULL
109748 +do_sys_poll_54221 do_sys_poll 2 54221 NULL
109749 +__register_chrdev_54223 __register_chrdev 2-3 54223 NULL
109750 +pi_read_regr_54231 pi_read_regr 0 54231 NULL
109751 +mcp23s08_read_regs_54246 mcp23s08_read_regs 4 54246 NULL
109752 +reada_add_block_54247 reada_add_block 2 54247 NULL
109753 +write_file_spec_scan_ctl_54248 write_file_spec_scan_ctl 3 54248 NULL
109754 +jbd2__journal_restart_54249 jbd2__journal_restart 0 54249 NULL
109755 +barrier_all_devices_54254 barrier_all_devices 0 54254 NULL nohasharray
109756 +xfs_dir2_sf_addname_hard_54254 xfs_dir2_sf_addname_hard 3 54254 &barrier_all_devices_54254
109757 +ceph_msgpool_get_54258 ceph_msgpool_get 2 54258 NULL
109758 +wusb_prf_54261 wusb_prf 7 54261 NULL nohasharray
109759 +audio_write_54261 audio_write 4 54261 &wusb_prf_54261
109760 +mwifiex_getlog_read_54269 mwifiex_getlog_read 3 54269 NULL
109761 +kstrtou16_from_user_54274 kstrtou16_from_user 2 54274 NULL
109762 +ubi_calc_data_len_54279 ubi_calc_data_len 0-3 54279 NULL
109763 +altera_set_dr_post_54291 altera_set_dr_post 2 54291 NULL
109764 +dlm_alloc_pagevec_54296 dlm_alloc_pagevec 1 54296 NULL
109765 +get_iovec_page_array_54298 get_iovec_page_array 6 54298 NULL
109766 +ttm_mem_global_alloc_54299 ttm_mem_global_alloc 0 54299 NULL
109767 +reclaim_pages_54301 reclaim_pages 3 54301 NULL
109768 +sprintf_54306 sprintf 0 54306 NULL
109769 +bio_add_pc_page_54319 bio_add_pc_page 4 54319 NULL
109770 +br_fdb_fillbuf_54339 br_fdb_fillbuf 0 54339 NULL
109771 +__alloc_dev_table_54343 __alloc_dev_table 2 54343 NULL
109772 +__get_free_pages_54352 __get_free_pages 0 54352 NULL
109773 +tcf_hash_create_54360 tcf_hash_create 4 54360 NULL
109774 +read_file_credit_dist_stats_54367 read_file_credit_dist_stats 3 54367 NULL
109775 +vfs_readlink_54368 vfs_readlink 3 54368 NULL
109776 +do_dccp_setsockopt_54377 do_dccp_setsockopt 5 54377 NULL nohasharray
109777 +intel_sdvo_write_cmd_54377 intel_sdvo_write_cmd 4 54377 &do_dccp_setsockopt_54377
109778 +ah_alloc_tmp_54378 ah_alloc_tmp 2-3 54378 NULL
109779 +gart_unmap_page_54379 gart_unmap_page 3-2 54379 NULL
109780 +snd_pcm_oss_read2_54387 snd_pcm_oss_read2 3-0 54387 NULL
109781 +iwl_dbgfs_power_save_status_read_54392 iwl_dbgfs_power_save_status_read 3 54392 NULL
109782 +efx_nic_describe_stats_54407 efx_nic_describe_stats 2 54407 NULL
109783 +ll_ra_count_get_54410 ll_ra_count_get 3 54410 NULL
109784 +copy_gadget_strings_54417 copy_gadget_strings 3-2 54417 NULL
109785 +btrfs_inc_extent_ref_54442 btrfs_inc_extent_ref 0 54442 NULL
109786 +sparse_early_mem_maps_alloc_node_54485 sparse_early_mem_maps_alloc_node 4 54485 NULL
109787 +simple_strtoull_54493 simple_strtoull 0 54493 NULL
109788 +swiotlb_tbl_map_single_54495 swiotlb_tbl_map_single 0 54495 NULL
109789 +btrfs_ordered_sum_size_54509 btrfs_ordered_sum_size 0-2 54509 NULL
109790 +cgroup_write_X64_54514 cgroup_write_X64 5 54514 NULL
109791 +rfc4106_set_key_54519 rfc4106_set_key 3 54519 NULL
109792 +vmci_transport_dgram_enqueue_54525 vmci_transport_dgram_enqueue 4 54525 NULL
109793 +viacam_read_54526 viacam_read 3 54526 NULL
109794 +unix_dgram_connect_54535 unix_dgram_connect 3 54535 NULL
109795 +setsockopt_54539 setsockopt 5 54539 NULL
109796 +i915_reset_gen7_sol_offsets_54547 i915_reset_gen7_sol_offsets 0 54547 NULL
109797 +lbs_lowsnr_write_54549 lbs_lowsnr_write 3 54549 NULL
109798 +ntfs_commit_pages_after_non_resident_write_54555 ntfs_commit_pages_after_non_resident_write 4-3 54555 NULL nohasharray
109799 +i915_gem_get_seqno_54555 i915_gem_get_seqno 0 54555 &ntfs_commit_pages_after_non_resident_write_54555
109800 +btrfs_update_inode_item_54561 btrfs_update_inode_item 0 54561 NULL nohasharray
109801 +SYSC_setsockopt_54561 SYSC_setsockopt 5 54561 &btrfs_update_inode_item_54561
109802 +nfsd_vfs_write_54577 nfsd_vfs_write 6 54577 NULL
109803 +fw_iso_buffer_init_54582 fw_iso_buffer_init 3 54582 NULL
109804 +nvme_npages_54601 nvme_npages 0-1 54601 NULL
109805 +irq_pkt_threshold_write_54605 irq_pkt_threshold_write 3 54605 NULL
109806 +devm_gen_pool_create_54607 devm_gen_pool_create 3 54607 NULL
109807 +port_fops_write_54627 port_fops_write 3 54627 NULL
109808 +irq_timeout_read_54653 irq_timeout_read 3 54653 NULL
109809 +dns_resolver_read_54658 dns_resolver_read 3 54658 NULL
109810 +twl6030_interrupt_mask_54659 twl6030_interrupt_mask 2 54659 NULL
109811 +kvm_read_cr3_54662 kvm_read_cr3 0 54662 NULL
109812 +tdp_page_fault_54663 tdp_page_fault 2 54663 NULL
109813 +bus_add_device_54665 bus_add_device 0 54665 NULL
109814 +cw1200_queue_stats_init_54670 cw1200_queue_stats_init 2 54670 NULL
109815 +bio_kmalloc_54672 bio_kmalloc 2 54672 NULL
109816 +vring_new_virtqueue_54673 vring_new_virtqueue 2 54673 NULL
109817 +evm_read_key_54674 evm_read_key 3 54674 NULL
109818 +fs_path_add_path_54680 fs_path_add_path 0 54680 NULL
109819 +platform_get_irq_byname_54700 platform_get_irq_byname 0 54700 NULL
109820 +__btrfs_inc_extent_ref_54706 __btrfs_inc_extent_ref 7-0 54706 NULL
109821 +rfkill_fop_read_54711 rfkill_fop_read 3 54711 NULL
109822 +clk_divider_set_rate_54726 clk_divider_set_rate 3-2 54726 NULL
109823 +ocfs2_control_write_54737 ocfs2_control_write 3 54737 NULL
109824 +kzalloc_54740 kzalloc 1 54740 NULL
109825 +wep_iv_read_54744 wep_iv_read 3 54744 NULL
109826 +lpfc_idiag_pcicfg_write_54749 lpfc_idiag_pcicfg_write 3 54749 NULL
109827 +iio_event_chrdev_read_54757 iio_event_chrdev_read 3 54757 NULL
109828 +adis16480_show_firmware_date_54762 adis16480_show_firmware_date 3 54762 NULL
109829 +ldsem_atomic_update_54774 ldsem_atomic_update 1-0 54774 NULL
109830 +gpiochip_add_54781 gpiochip_add 0 54781 NULL
109831 +flexcop_device_kmalloc_54793 flexcop_device_kmalloc 1 54793 NULL
109832 +domain_init_54797 domain_init 2 54797 NULL
109833 +ext3_find_goal_54801 ext3_find_goal 0 54801 NULL
109834 +get_dev_size_54807 get_dev_size 0 54807 NULL
109835 +nfsd_write_54809 nfsd_write 6 54809 NULL
109836 +aes_decrypt_fail_read_54815 aes_decrypt_fail_read 3 54815 NULL nohasharray
109837 +kvzalloc_54815 kvzalloc 1 54815 &aes_decrypt_fail_read_54815 nohasharray
109838 +crypto_tfm_ctx_alignment_54815 crypto_tfm_ctx_alignment 0 54815 &kvzalloc_54815
109839 +generic_perform_write_54832 generic_perform_write 3 54832 NULL
109840 +write_rio_54837 write_rio 3 54837 NULL
109841 +nouveau_engctx_create__54839 nouveau_engctx_create_ 8 54839 NULL nohasharray
109842 +ext3_acl_from_disk_54839 ext3_acl_from_disk 2 54839 &nouveau_engctx_create__54839
109843 +ufx_ops_write_54848 ufx_ops_write 3 54848 NULL
109844 +printer_read_54851 printer_read 3 54851 NULL
109845 +qib_reg_user_mr_54858 qib_reg_user_mr 2-3 54858 NULL
109846 +alloc_ep_req_54860 alloc_ep_req 2 54860 NULL
109847 +broadsheet_spiflash_rewrite_sector_54864 broadsheet_spiflash_rewrite_sector 2 54864 NULL
109848 +prism_build_supp_rates_54865 prism_build_supp_rates 0 54865 NULL
109849 +iscsi_pool_init_54913 iscsi_pool_init 4-2 54913 NULL nohasharray
109850 +kobject_set_name_vargs_54913 kobject_set_name_vargs 0 54913 &iscsi_pool_init_54913
109851 +btrfs_stack_chunk_num_stripes_54923 btrfs_stack_chunk_num_stripes 0 54923 NULL
109852 +bio_add_page_54933 bio_add_page 0-3 54933 NULL
109853 +mxms_structlen_54939 mxms_structlen 0 54939 NULL
109854 +add_port_54941 add_port 2 54941 NULL
109855 +ath9k_dump_btcoex_54949 ath9k_dump_btcoex 0 54949 NULL
109856 +alauda_write_data_54967 alauda_write_data 3 54967 NULL
109857 +c4_add_card_54968 c4_add_card 3 54968 NULL
109858 +iwl_pcie_dump_fh_54975 iwl_pcie_dump_fh 0 54975 NULL
109859 +ubi_change_vtbl_record_54979 ubi_change_vtbl_record 0 54979 NULL
109860 +ext3_xattr_get_54989 ext3_xattr_get 0 54989 NULL
109861 +rds_ib_inc_copy_to_user_55007 rds_ib_inc_copy_to_user 3 55007 NULL
109862 +mem_cgroup_get_lru_size_55008 mem_cgroup_get_lru_size 0 55008 NULL
109863 +cx231xx_v4l2_read_55014 cx231xx_v4l2_read 3 55014 NULL
109864 +paging32_get_level1_sp_gpa_55022 paging32_get_level1_sp_gpa 0 55022 NULL
109865 +error_error_null_Frame_tx_start_read_55024 error_error_null_Frame_tx_start_read 3 55024 NULL
109866 +dgap_do_bios_load_55025 dgap_do_bios_load 3 55025 NULL
109867 +btrfs_init_acl_55028 btrfs_init_acl 0 55028 NULL
109868 +write_ctree_super_55039 write_ctree_super 0 55039 NULL
109869 +ext4_ext_handle_uninitialized_extents_55059 ext4_ext_handle_uninitialized_extents 0-6 55059 NULL
109870 +apei_exec_run_55075 apei_exec_run 0 55075 NULL
109871 +bitmap_storage_alloc_55077 bitmap_storage_alloc 2 55077 NULL
109872 +snd_pcm_capture_hw_avail_55086 snd_pcm_capture_hw_avail 0 55086 NULL nohasharray
109873 +read_dma_55086 read_dma 3 55086 &snd_pcm_capture_hw_avail_55086
109874 +rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read_55106 rxpipe_beacon_buffer_thres_host_int_trig_rx_data_read 3 55106 NULL
109875 +corrupt_data_55120 corrupt_data 0-3 55120 NULL
109876 +crypto_ahash_setkey_55134 crypto_ahash_setkey 3 55134 NULL
109877 +ocfs2_prepare_refcount_change_for_del_55137 ocfs2_prepare_refcount_change_for_del 3-0 55137 NULL nohasharray
109878 +filldir_55137 filldir 3 55137 &ocfs2_prepare_refcount_change_for_del_55137
109879 +validate_vid_hdr_55145 validate_vid_hdr 0 55145 NULL
109880 +ocfs2_truncate_file_55148 ocfs2_truncate_file 3 55148 NULL
109881 +npages_to_npools_55149 npages_to_npools 0-1 55149 NULL
109882 +ieee80211_if_read_uapsd_queues_55150 ieee80211_if_read_uapsd_queues 3 55150 NULL
109883 +mtd_get_fact_prot_info_55186 mtd_get_fact_prot_info 0 55186 NULL
109884 +sel_write_relabel_55195 sel_write_relabel 3 55195 NULL
109885 +sched_feat_write_55202 sched_feat_write 3 55202 NULL
109886 +ht40allow_map_read_55209 ht40allow_map_read 3 55209 NULL
109887 +__kfifo_dma_out_prepare_r_55211 __kfifo_dma_out_prepare_r 4-5 55211 NULL
109888 +do_raw_setsockopt_55215 do_raw_setsockopt 5 55215 NULL
109889 +qxl_alloc_client_monitors_config_55216 qxl_alloc_client_monitors_config 2 55216 NULL
109890 +dbAllocDmap_55227 dbAllocDmap 0 55227 NULL
109891 +hash_netport6_expire_55232 hash_netport6_expire 3 55232 NULL
109892 +memcpy_fromiovec_55247 memcpy_fromiovec 3 55247 NULL
109893 +lbs_failcount_write_55276 lbs_failcount_write 3 55276 NULL
109894 +persistent_ram_new_55286 persistent_ram_new 2-1 55286 NULL
109895 +ptrace_request_55288 ptrace_request 3 55288 NULL
109896 +rx_streaming_interval_read_55291 rx_streaming_interval_read 3 55291 NULL
109897 +lov_get_stripecnt_55297 lov_get_stripecnt 0-3 55297 NULL
109898 +gsm_control_modem_55303 gsm_control_modem 3 55303 NULL
109899 +wimax_msg_len_55304 wimax_msg_len 0 55304 NULL
109900 +__get_vm_area_node_55305 __get_vm_area_node 6 55305 NULL nohasharray
109901 +qp_alloc_guest_work_55305 qp_alloc_guest_work 5-3 55305 &__get_vm_area_node_55305
109902 +__vxge_hw_vpath_initialize_55328 __vxge_hw_vpath_initialize 2 55328 NULL
109903 +do_shmat_55336 do_shmat 5 55336 NULL
109904 +vme_user_read_55338 vme_user_read 3 55338 NULL
109905 +sctp_datamsg_from_user_55342 sctp_datamsg_from_user 4 55342 NULL nohasharray
109906 +__wa_xfer_setup_sizes_55342 __wa_xfer_setup_sizes 0 55342 &sctp_datamsg_from_user_55342
109907 +iterate_inode_extrefs_55362 iterate_inode_extrefs 0 55362 NULL nohasharray
109908 +acpi_system_read_event_55362 acpi_system_read_event 3 55362 &iterate_inode_extrefs_55362
109909 +cw1200_sdio_align_size_55391 cw1200_sdio_align_size 2 55391 NULL
109910 +__domain_mapping_55393 __domain_mapping 5 55393 NULL
109911 +iwl_dbgfs_plcp_delta_read_55407 iwl_dbgfs_plcp_delta_read 3 55407 NULL
109912 +si476x_radio_read_rds_blckcnt_blob_55427 si476x_radio_read_rds_blckcnt_blob 3 55427 NULL
109913 +SyS_read_55449 SyS_read 3 55449 NULL
109914 +add_relation_rb_55451 add_relation_rb 0 55451 NULL
109915 +__vxge_hw_channel_allocate_55462 __vxge_hw_channel_allocate 3 55462 NULL
109916 +cx23888_ir_rx_read_55473 cx23888_ir_rx_read 3 55473 NULL
109917 +snd_pcm_lib_write_55483 snd_pcm_lib_write 0-3 55483 NULL
109918 +i2o_pool_alloc_55485 i2o_pool_alloc 4 55485 NULL
109919 +ocfs2_rec_clusters_55501 ocfs2_rec_clusters 0 55501 NULL nohasharray
109920 +ras_stride_increase_window_55501 ras_stride_increase_window 3 55501 &ocfs2_rec_clusters_55501
109921 +ext4_flex_bg_size_55502 ext4_flex_bg_size 0 55502 NULL
109922 +tx_tx_done_int_template_read_55511 tx_tx_done_int_template_read 3 55511 NULL
109923 +ea_get_55522 ea_get 3-0 55522 NULL
109924 +buffer_size_55534 buffer_size 0 55534 NULL
109925 +set_msr_interception_55538 set_msr_interception 2 55538 NULL
109926 +tty_port_register_device_55543 tty_port_register_device 3 55543 NULL
109927 +dgap_do_config_load_55548 dgap_do_config_load 2 55548 NULL
109928 +dm_stats_list_55551 dm_stats_list 4 55551 NULL
109929 +gfs2_bitfit_55566 gfs2_bitfit 2-3-0 55566 NULL
109930 +hash_netport4_expire_55584 hash_netport4_expire 3 55584 NULL
109931 +add_partition_55588 add_partition 2 55588 NULL
109932 +kstrtou8_from_user_55599 kstrtou8_from_user 2 55599 NULL
109933 +SyS_keyctl_55602 SyS_keyctl 4 55602 NULL
109934 +allocate_without_wrapping_disk_55607 allocate_without_wrapping_disk 4 55607 NULL
109935 +macvtap_put_user_55609 macvtap_put_user 4 55609 NULL
109936 +selinux_setprocattr_55611 selinux_setprocattr 4 55611 NULL
109937 +edge_tty_recv_55622 edge_tty_recv 3 55622 NULL
109938 +snd_pcm_hw_param_last_55624 snd_pcm_hw_param_last 0 55624 NULL
109939 +reiserfs_xattr_get_55628 reiserfs_xattr_get 0 55628 NULL nohasharray
109940 +pktgen_if_write_55628 pktgen_if_write 3 55628 &reiserfs_xattr_get_55628
109941 +osc_obd_max_pages_per_rpc_seq_write_55636 osc_obd_max_pages_per_rpc_seq_write 3 55636 NULL
109942 +dvb_dmxdev_set_buffer_size_55643 dvb_dmxdev_set_buffer_size 2 55643 NULL
109943 +mlx4_buddy_alloc_55647 mlx4_buddy_alloc 2 55647 NULL
109944 +xfs_bmbt_maxrecs_55649 xfs_bmbt_maxrecs 0-2 55649 NULL
109945 +lpfc_idiag_queinfo_read_55662 lpfc_idiag_queinfo_read 3 55662 NULL
109946 +il_dbgfs_tx_queue_read_55668 il_dbgfs_tx_queue_read 3 55668 NULL
109947 +btrfs_reloc_cow_block_55672 btrfs_reloc_cow_block 0 55672 NULL
109948 +get_info_55681 get_info 3 55681 NULL
109949 +iwl_dbgfs_plcp_delta_write_55682 iwl_dbgfs_plcp_delta_write 3 55682 NULL
109950 +pm8001_store_update_fw_55716 pm8001_store_update_fw 4 55716 NULL
109951 +ocfs2_lock_refcount_tree_55719 ocfs2_lock_refcount_tree 0 55719 NULL nohasharray
109952 +mtdswap_init_55719 mtdswap_init 2 55719 &ocfs2_lock_refcount_tree_55719
109953 +tap_pwup_write_55723 tap_pwup_write 3 55723 NULL
109954 +__iio_allocate_kfifo_55738 __iio_allocate_kfifo 2 55738 NULL
109955 +set_local_name_55757 set_local_name 4 55757 NULL
109956 +strlen_55778 strlen 0 55778 NULL nohasharray
109957 +is_idx_node_in_tnc_55778 is_idx_node_in_tnc 0 55778 &strlen_55778
109958 +set_spte_55783 set_spte 4-5 55783 NULL
109959 +req_bio_endio_55786 req_bio_endio 3 55786 NULL nohasharray
109960 +conf_read_55786 conf_read 3 55786 &req_bio_endio_55786
109961 +uwb_rc_neh_grok_event_55799 uwb_rc_neh_grok_event 3 55799 NULL
109962 +sb16_copy_from_user_55836 sb16_copy_from_user 7-6-10 55836 NULL
109963 +ept_gva_to_gpa_55848 ept_gva_to_gpa 2 55848 NULL
109964 +ip_hdrlen_55849 ip_hdrlen 0 55849 NULL
109965 +hcd_alloc_coherent_55862 hcd_alloc_coherent 5 55862 NULL
109966 +shmem_setxattr_55867 shmem_setxattr 4 55867 NULL
109967 +__check_block_validity_55869 __check_block_validity 0 55869 NULL
109968 +hsc_write_55875 hsc_write 3 55875 NULL
109969 +pm_qos_power_read_55891 pm_qos_power_read 3 55891 NULL
109970 +snd_pcm_hw_param_value_min_55917 snd_pcm_hw_param_value_min 0 55917 NULL
109971 +ext2_direct_IO_55932 ext2_direct_IO 4 55932 NULL
109972 +paging64_page_fault_55942 paging64_page_fault 2 55942 NULL
109973 +kvm_write_guest_virt_system_55944 kvm_write_guest_virt_system 4-2 55944 NULL
109974 +sel_read_policy_55947 sel_read_policy 3 55947 NULL
109975 +ceph_get_direct_page_vector_55956 ceph_get_direct_page_vector 2 55956 NULL
109976 +simple_read_from_buffer_55957 simple_read_from_buffer 5-2 55957 NULL
109977 +tx_tx_imm_resp_read_55964 tx_tx_imm_resp_read 3 55964 NULL
109978 +btrfs_clone_55977 btrfs_clone 5-3 55977 NULL
109979 +wa_xfer_create_subset_sg_55992 wa_xfer_create_subset_sg 2-3 55992 NULL
109980 +nvme_alloc_iod_56027 nvme_alloc_iod 1-2 56027 NULL
109981 +dccp_sendmsg_56058 dccp_sendmsg 4 56058 NULL
109982 +__set_discard_56081 __set_discard 2 56081 NULL
109983 +pscsi_get_bio_56103 pscsi_get_bio 1 56103 NULL
109984 +usb_alloc_stream_buffers_56123 usb_alloc_stream_buffers 3 56123 NULL
109985 +sel_read_handle_status_56139 sel_read_handle_status 3 56139 NULL
109986 +write_file_frameerrors_56145 write_file_frameerrors 3 56145 NULL
109987 +add_extent_mapping_56157 add_extent_mapping 0 56157 NULL
109988 +__i2c_transfer_56162 __i2c_transfer 0 56162 NULL
109989 +rawv6_setsockopt_56165 rawv6_setsockopt 5 56165 NULL
109990 +create_irq_nr_56180 create_irq_nr 1-0-2 56180 NULL
109991 +ath9k_dump_legacy_btcoex_56194 ath9k_dump_legacy_btcoex 0 56194 NULL
109992 +ocfs2_journal_access_gd_56209 ocfs2_journal_access_gd 0 56209 NULL
109993 +update_space_info_56213 update_space_info 0 56213 NULL
109994 +ncp_read_bounce_size_56221 ncp_read_bounce_size 0-1 56221 NULL
109995 +vring_add_indirect_56222 vring_add_indirect 4 56222 NULL
109996 +ocfs2_find_xe_in_bucket_56224 ocfs2_find_xe_in_bucket 0 56224 NULL
109997 +do_ipt_set_ctl_56238 do_ipt_set_ctl 4 56238 NULL
109998 +fd_copyin_56247 fd_copyin 3 56247 NULL
109999 +sk_rmem_schedule_56255 sk_rmem_schedule 3 56255 NULL
110000 +p9pdu_vreadf_56271 p9pdu_vreadf 0 56271 NULL
110001 +il4965_ucode_general_stats_read_56277 il4965_ucode_general_stats_read 3 56277 NULL
110002 +ieee80211_if_fmt_user_power_level_56283 ieee80211_if_fmt_user_power_level 3 56283 NULL
110003 +RESIZE_IF_NEEDED_56286 RESIZE_IF_NEEDED 2 56286 NULL
110004 +dvb_aplay_56296 dvb_aplay 3 56296 NULL
110005 +btmrvl_hscfgcmd_read_56303 btmrvl_hscfgcmd_read 3 56303 NULL
110006 +speakup_file_write_56310 speakup_file_write 3 56310 NULL
110007 +pipeline_pre_to_defrag_swi_read_56321 pipeline_pre_to_defrag_swi_read 3 56321 NULL
110008 +journal_init_revoke_table_56331 journal_init_revoke_table 1 56331 NULL
110009 +snd_rawmidi_read_56337 snd_rawmidi_read 3 56337 NULL
110010 +ipv6_recv_error_56347 ipv6_recv_error 3 56347 NULL
110011 +vxge_os_dma_malloc_async_56348 vxge_os_dma_malloc_async 3 56348 NULL
110012 +mite_device_bytes_transferred_56355 mite_device_bytes_transferred 0 56355 NULL
110013 +iov_iter_copy_from_user_atomic_56368 iov_iter_copy_from_user_atomic 0-4 56368 NULL
110014 +dev_read_56369 dev_read 3 56369 NULL
110015 +ath10k_read_simulate_fw_crash_56371 ath10k_read_simulate_fw_crash 3 56371 NULL
110016 +ata_qc_complete_multiple_56376 ata_qc_complete_multiple 2 56376 NULL
110017 +snd_pcm_common_ioctl1_56382 snd_pcm_common_ioctl1 0 56382 NULL
110018 +write_gssp_56404 write_gssp 3 56404 NULL
110019 +ocfs2_control_read_56405 ocfs2_control_read 3 56405 NULL
110020 +do_get_write_access_56410 do_get_write_access 0 56410 NULL
110021 +i915_gem_object_sync_56417 i915_gem_object_sync 0 56417 NULL nohasharray
110022 +store_msg_56417 store_msg 3 56417 &i915_gem_object_sync_56417
110023 +pppol2tp_sendmsg_56420 pppol2tp_sendmsg 4 56420 NULL
110024 +fl_create_56435 fl_create 5 56435 NULL
110025 +gnttab_map_56439 gnttab_map 2 56439 NULL
110026 +cx231xx_init_isoc_56453 cx231xx_init_isoc 3-2-4 56453 NULL
110027 +set_connectable_56458 set_connectable 4 56458 NULL
110028 +osd_req_list_partition_objects_56464 osd_req_list_partition_objects 5 56464 NULL
110029 +putused_user_56467 putused_user 3 56467 NULL
110030 +lbs_rdmac_write_56471 lbs_rdmac_write 3 56471 NULL
110031 +calc_linear_pos_56472 calc_linear_pos 0-3 56472 NULL
110032 +global_rt_period_56476 global_rt_period 0 56476 NULL
110033 +crypto_shash_alignmask_56486 crypto_shash_alignmask 0 56486 NULL
110034 +ieee80211_rx_mgmt_probe_beacon_56491 ieee80211_rx_mgmt_probe_beacon 3 56491 NULL
110035 +cfs_access_process_vm_56503 cfs_access_process_vm 2-4-0 56503 NULL
110036 +init_map_ip_56508 init_map_ip 5 56508 NULL
110037 +lustre_posix_acl_xattr_reduce_space_56512 lustre_posix_acl_xattr_reduce_space 3 56512 NULL
110038 +ext4_zeroout_es_56514 ext4_zeroout_es 0 56514 NULL
110039 +cfg80211_connect_result_56515 cfg80211_connect_result 6-4 56515 NULL
110040 +ip_options_get_56538 ip_options_get 4 56538 NULL
110041 +ll_wr_track_id_56544 ll_wr_track_id 2 56544 NULL
110042 +vb2_queue_or_prepare_buf_56547 vb2_queue_or_prepare_buf 0 56547 NULL
110043 +ocfs2_change_extent_flag_56549 ocfs2_change_extent_flag 5 56549 NULL
110044 +alloc_apertures_56561 alloc_apertures 1 56561 NULL
110045 +rs_sta_dbgfs_stats_table_read_56573 rs_sta_dbgfs_stats_table_read 3 56573 NULL
110046 +portcntrs_2_read_56586 portcntrs_2_read 3 56586 NULL
110047 +event_filter_write_56609 event_filter_write 3 56609 NULL
110048 +nvme_trans_log_temperature_56613 nvme_trans_log_temperature 3 56613 NULL
110049 +gather_array_56641 gather_array 3 56641 NULL
110050 +lookup_extent_backref_56644 lookup_extent_backref 9-0 56644 NULL
110051 +uvc_debugfs_stats_read_56651 uvc_debugfs_stats_read 3 56651 NULL
110052 +tg3_nvram_write_block_56666 tg3_nvram_write_block 3 56666 NULL
110053 +btrfs_cow_block_56678 btrfs_cow_block 0 56678 NULL
110054 +snd_gus_dram_read_56686 snd_gus_dram_read 4 56686 NULL nohasharray
110055 +da9055_gpio_to_irq_56686 da9055_gpio_to_irq 2 56686 &snd_gus_dram_read_56686
110056 +build_map_info_56696 build_map_info 2 56696 NULL
110057 +dvb_ringbuffer_read_user_56702 dvb_ringbuffer_read_user 3-0 56702 NULL
110058 +sta_flags_read_56710 sta_flags_read 3 56710 NULL
110059 +ipv6_getsockopt_sticky_56711 ipv6_getsockopt_sticky 5 56711 NULL
110060 +__wa_xfer_setup_segs_56725 __wa_xfer_setup_segs 2 56725 NULL
110061 +__copy_from_user_ll_56738 __copy_from_user_ll 0-3 56738 NULL
110062 +pcpu_populate_chunk_56741 pcpu_populate_chunk 3-2 56741 NULL
110063 +drm_agp_bind_pages_56748 drm_agp_bind_pages 3 56748 NULL
110064 +btrfsic_map_block_56751 btrfsic_map_block 2 56751 NULL
110065 +alloc_iommu_56778 alloc_iommu 2-3-0 56778 NULL
110066 +hash_lookup_56792 hash_lookup 2 56792 NULL nohasharray
110067 +ttm_alloc_new_pages_56792 ttm_alloc_new_pages 5 56792 &hash_lookup_56792
110068 +do_syslog_56807 do_syslog 3 56807 NULL
110069 +ext4_ext_rm_idx_56827 ext4_ext_rm_idx 0 56827 NULL
110070 +mtdchar_write_56831 mtdchar_write 3 56831 NULL nohasharray
110071 +ntfs_rl_realloc_56831 ntfs_rl_realloc 3 56831 &mtdchar_write_56831
110072 +sysctl_sync_threshold_56835 sysctl_sync_threshold 0 56835 NULL
110073 +snd_rawmidi_kernel_write1_56847 snd_rawmidi_kernel_write1 4-0 56847 NULL
110074 +si476x_radio_read_agc_blob_56849 si476x_radio_read_agc_blob 3 56849 NULL
110075 +wb_lookup_56858 wb_lookup 2 56858 NULL
110076 +ext3_xattr_ibody_get_56880 ext3_xattr_ibody_get 0 56880 NULL
110077 +pvr2_debugifc_print_status_56890 pvr2_debugifc_print_status 3 56890 NULL
110078 +debug_debug3_read_56894 debug_debug3_read 3 56894 NULL
110079 +batadv_tt_update_changes_56895 batadv_tt_update_changes 3 56895 NULL
110080 +hfsplus_find_cat_56899 hfsplus_find_cat 0 56899 NULL
110081 +hfsplus_setxattr_56902 hfsplus_setxattr 4 56902 NULL
110082 +__bitmap_clear_bits_56912 __bitmap_clear_bits 3 56912 NULL
110083 +strcspn_56913 strcspn 0 56913 NULL
110084 +__kfifo_out_56927 __kfifo_out 0-3 56927 NULL
110085 +CopyBufferToControlPacket_56933 CopyBufferToControlPacket 0 56933 NULL nohasharray
110086 +journal_init_revoke_56933 journal_init_revoke 2 56933 &CopyBufferToControlPacket_56933
110087 +nouveau_xtensa_create__56952 nouveau_xtensa_create_ 8 56952 NULL
110088 +diva_get_driver_info_56967 diva_get_driver_info 0 56967 NULL
110089 +nouveau_device_create__56984 nouveau_device_create_ 6 56984 NULL
110090 +sptlrpc_secflags2str_56995 sptlrpc_secflags2str 3 56995 NULL
110091 +vlsi_alloc_ring_57003 vlsi_alloc_ring 3-4 57003 NULL
110092 +btrfs_super_csum_size_57004 btrfs_super_csum_size 0 57004 NULL
110093 +aircable_process_packet_57027 aircable_process_packet 4 57027 NULL
110094 +i915_gem_evict_everything_57038 i915_gem_evict_everything 0 57038 NULL
110095 +skb_network_offset_57043 skb_network_offset 0 57043 NULL nohasharray
110096 +ieee80211_if_fmt_state_57043 ieee80211_if_fmt_state 3 57043 &skb_network_offset_57043
110097 +bytes_to_samples_57049 bytes_to_samples 0-2 57049 NULL
110098 +xfs_buf_read_map_57053 xfs_buf_read_map 3 57053 NULL
110099 +__wl_get_peb_57058 __wl_get_peb 0 57058 NULL
110100 +cx2341x_ctrl_new_std_57061 cx2341x_ctrl_new_std 4 57061 NULL
110101 +sca3000_read_data_57064 sca3000_read_data 4 57064 NULL
110102 +pcmcia_replace_cis_57066 pcmcia_replace_cis 3 57066 NULL
110103 +tracing_set_trace_write_57096 tracing_set_trace_write 3 57096 NULL
110104 +altera_get_note_57099 altera_get_note 6 57099 NULL
110105 +hpfs_readpages_57106 hpfs_readpages 4 57106 NULL
110106 +snd_pcm_hw_params_old_user_57108 snd_pcm_hw_params_old_user 0 57108 NULL
110107 +crypto_compress_ctxsize_57109 crypto_compress_ctxsize 0 57109 NULL
110108 +sysfs_write_file_57116 sysfs_write_file 3 57116 NULL
110109 +cipso_v4_gentag_loc_57119 cipso_v4_gentag_loc 0 57119 NULL
110110 +nl80211_send_deauth_57136 nl80211_send_deauth 4 57136 NULL nohasharray
110111 +rds_ib_sub_signaled_57136 rds_ib_sub_signaled 2 57136 &nl80211_send_deauth_57136 nohasharray
110112 +ima_show_htable_value_57136 ima_show_htable_value 2 57136 &rds_ib_sub_signaled_57136
110113 +snd_sonicvibes_getdmac_57140 snd_sonicvibes_getdmac 0 57140 NULL
110114 +changed_inode_57156 changed_inode 0 57156 NULL
110115 +udl_prime_create_57159 udl_prime_create 2 57159 NULL
110116 +__ipath_get_user_pages_57166 __ipath_get_user_pages 1-2 57166 NULL
110117 +stk_prepare_sio_buffers_57168 stk_prepare_sio_buffers 2 57168 NULL
110118 +rx_hw_stuck_read_57179 rx_hw_stuck_read 3 57179 NULL
110119 +ocfs2_claim_metadata_57192 ocfs2_claim_metadata 0 57192 NULL
110120 +tt3650_ci_msg_57219 tt3650_ci_msg 4 57219 NULL
110121 +dma_fifo_alloc_57236 dma_fifo_alloc 5-3-2 57236 NULL
110122 +flush_space_57241 flush_space 3-0 57241 NULL
110123 +rsxx_cram_write_57244 rsxx_cram_write 3 57244 NULL
110124 +ieee80211_if_fmt_tsf_57249 ieee80211_if_fmt_tsf 3 57249 NULL
110125 +oprofilefs_ulong_from_user_57251 oprofilefs_ulong_from_user 3 57251 NULL
110126 +alloc_flex_gd_57259 alloc_flex_gd 1 57259 NULL
110127 +lbs_sleepparams_write_57283 lbs_sleepparams_write 3 57283 NULL
110128 +pstore_file_read_57288 pstore_file_read 3 57288 NULL
110129 +snd_pcm_read_57289 snd_pcm_read 3 57289 NULL
110130 +fw_file_size_57307 fw_file_size 0 57307 NULL
110131 +ftdi_elan_write_57309 ftdi_elan_write 3 57309 NULL
110132 +write_file_regval_57313 write_file_regval 3 57313 NULL
110133 +__mxt_write_reg_57326 __mxt_write_reg 3 57326 NULL
110134 +ocfs2_xattr_shrink_size_57328 ocfs2_xattr_shrink_size 3 57328 NULL
110135 +usblp_read_57342 usblp_read 3 57342 NULL
110136 +print_devstats_dot11RTSFailureCount_57347 print_devstats_dot11RTSFailureCount 3 57347 NULL
110137 +dio_send_cur_page_57348 dio_send_cur_page 0 57348 NULL
110138 +tipc_bclink_stats_57372 tipc_bclink_stats 2 57372 NULL
110139 +max8997_irq_domain_map_57375 max8997_irq_domain_map 2 57375 NULL
110140 +tty_register_device_attr_57381 tty_register_device_attr 2 57381 NULL
110141 +read_file_blob_57406 read_file_blob 3 57406 NULL
110142 +enclosure_register_57412 enclosure_register 3 57412 NULL
110143 +compat_keyctl_instantiate_key_iov_57431 compat_keyctl_instantiate_key_iov 3 57431 NULL
110144 +copy_to_user_fromio_57432 copy_to_user_fromio 3 57432 NULL
110145 +__roundup_pow_of_two_57461 __roundup_pow_of_two 0 57461 NULL
110146 +crypto_tfm_alg_blocksize_57463 crypto_tfm_alg_blocksize 0 57463 NULL
110147 +sisusb_clear_vram_57466 sisusb_clear_vram 2-3 57466 NULL
110148 +blk_flush_cur_seq_57467 blk_flush_cur_seq 0 57467 NULL
110149 +ieee80211_if_read_flags_57470 ieee80211_if_read_flags 3 57470 NULL nohasharray
110150 +sep_lock_user_pages_57470 sep_lock_user_pages 2-3 57470 &ieee80211_if_read_flags_57470
110151 +ocfs2_write_cluster_57483 ocfs2_write_cluster 9-8-2 57483 NULL
110152 +bnad_debugfs_write_regwr_57500 bnad_debugfs_write_regwr 3 57500 NULL
110153 +skb_headlen_57501 skb_headlen 0 57501 NULL
110154 +copy_in_user_57502 copy_in_user 3 57502 NULL
110155 +ckhdid_printf_57505 ckhdid_printf 2 57505 NULL
110156 +init_tag_map_57515 init_tag_map 3 57515 NULL
110157 +wil_read_file_ssid_57517 wil_read_file_ssid 3 57517 NULL nohasharray
110158 +il_dbgfs_force_reset_read_57517 il_dbgfs_force_reset_read 3 57517 &wil_read_file_ssid_57517 nohasharray
110159 +btrfs_insert_inode_extref_57517 btrfs_insert_inode_extref 0 57517 &il_dbgfs_force_reset_read_57517
110160 +cmm_read_57520 cmm_read 3 57520 NULL
110161 +inode_permission_57531 inode_permission 0 57531 NULL
110162 +acpi_dev_get_resources_57534 acpi_dev_get_resources 0 57534 NULL
110163 +ptlrpc_lprocfs_hp_ratio_seq_write_57537 ptlrpc_lprocfs_hp_ratio_seq_write 3 57537 NULL
110164 +ReadHDLCPnP_57559 ReadHDLCPnP 0 57559 NULL nohasharray
110165 +ext4_group_first_block_no_57559 ext4_group_first_block_no 0-2 57559 &ReadHDLCPnP_57559
110166 +obd_unpackmd_57563 obd_unpackmd 0 57563 NULL
110167 +snd_pcm_playback_ioctl1_57569 snd_pcm_playback_ioctl1 0 57569 NULL
110168 +get_bridge_ifindices_57579 get_bridge_ifindices 0 57579 NULL
110169 +ldlm_cli_enqueue_local_57582 ldlm_cli_enqueue_local 11 57582 NULL
110170 +il_dbgfs_interrupt_write_57591 il_dbgfs_interrupt_write 3 57591 NULL
110171 +read_file_spectral_fft_period_57593 read_file_spectral_fft_period 3 57593 NULL
110172 +wm831x_gpio_to_irq_57614 wm831x_gpio_to_irq 2 57614 NULL
110173 +vma_kernel_pagesize_57616 vma_kernel_pagesize 0 57616 NULL
110174 +tx_tx_retry_template_read_57623 tx_tx_retry_template_read 3 57623 NULL
110175 +sisusbcon_putcs_57630 sisusbcon_putcs 3 57630 NULL
110176 +mem_read_57631 mem_read 3 57631 NULL
110177 +tc3589x_irq_map_57639 tc3589x_irq_map 2 57639 NULL
110178 +r3964_write_57662 r3964_write 4 57662 NULL
110179 +proc_ns_readlink_57664 proc_ns_readlink 3 57664 NULL
110180 +__lgwrite_57669 __lgwrite 4 57669 NULL
110181 +f1x_match_to_this_node_57695 f1x_match_to_this_node 3 57695 NULL
110182 +i2400m_rx_stats_read_57706 i2400m_rx_stats_read 3 57706 NULL
110183 +snd_interval_value_57713 snd_interval_value 0 57713 NULL
110184 +ieee80211_if_read_dot11MeshHWMPconfirmationInterval_57722 ieee80211_if_read_dot11MeshHWMPconfirmationInterval 3 57722 NULL
110185 +i915_gem_object_get_pages_57734 i915_gem_object_get_pages 0 57734 NULL
110186 +nouveau_gpio_create__57735 nouveau_gpio_create_ 4-5 57735 NULL
110187 +compat_sys_set_mempolicy_57742 compat_sys_set_mempolicy 3 57742 NULL nohasharray
110188 +pppol2tp_recvmsg_57742 pppol2tp_recvmsg 4 57742 &compat_sys_set_mempolicy_57742
110189 +ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval_57762 ieee80211_if_fmt_dot11MeshHWMPpreqMinInterval 3 57762 NULL
110190 +SYSC_process_vm_writev_57776 SYSC_process_vm_writev 5-3 57776 NULL
110191 +apei_exec_collect_resources_57788 apei_exec_collect_resources 0 57788 NULL
110192 +ld2_57794 ld2 0 57794 NULL
110193 +ivtv_read_57796 ivtv_read 3 57796 NULL
110194 +generic_ptrace_peekdata_57806 generic_ptrace_peekdata 2 57806 NULL
110195 +ipath_user_sdma_num_pages_57813 ipath_user_sdma_num_pages 0 57813 NULL
110196 +bfad_debugfs_read_regrd_57830 bfad_debugfs_read_regrd 3 57830 NULL
110197 +copy_to_user_57835 copy_to_user 3-0 57835 NULL
110198 +flash_read_57843 flash_read 3 57843 NULL
110199 +kiblnd_create_tx_pool_57846 kiblnd_create_tx_pool 2 57846 NULL
110200 +process_all_new_xattrs_57881 process_all_new_xattrs 0 57881 NULL
110201 +xt_alloc_table_info_57903 xt_alloc_table_info 1 57903 NULL
110202 +iio_read_first_n_kfifo_57910 iio_read_first_n_kfifo 2 57910 NULL nohasharray
110203 +atomic_add_return_unchecked_57910 atomic_add_return_unchecked 0-1 57910 &iio_read_first_n_kfifo_57910
110204 +memcg_caches_array_size_57918 memcg_caches_array_size 0-1 57918 NULL
110205 +twl_i2c_write_57923 twl_i2c_write 3-4 57923 NULL
110206 +__snd_gf1_look16_57925 __snd_gf1_look16 0 57925 NULL
110207 +sel_read_handle_unknown_57933 sel_read_handle_unknown 3 57933 NULL
110208 +nvc0_vm_create_57942 nvc0_vm_create 2-3 57942 NULL
110209 +xfs_mru_cache_create_57943 xfs_mru_cache_create 3 57943 NULL
110210 +key_algorithm_read_57946 key_algorithm_read 3 57946 NULL
110211 +ip_set_alloc_57953 ip_set_alloc 1 57953 NULL nohasharray
110212 +ioat3_dca_count_dca_slots_57953 ioat3_dca_count_dca_slots 0 57953 &ip_set_alloc_57953
110213 +c2_reg_user_mr_57982 c2_reg_user_mr 2-3 57982 NULL
110214 +interleave_nodes_57992 interleave_nodes 0 57992 NULL
110215 +do_rx_dma_57996 do_rx_dma 5 57996 NULL
110216 +rx_reset_counter_read_58001 rx_reset_counter_read 3 58001 NULL
110217 +iwl_dbgfs_ucode_rx_stats_read_58023 iwl_dbgfs_ucode_rx_stats_read 3 58023 NULL
110218 +io_playback_transfer_58030 io_playback_transfer 4 58030 NULL
110219 +ext4_block_bitmap_58033 ext4_block_bitmap 0 58033 NULL
110220 +mce_async_out_58056 mce_async_out 3 58056 NULL
110221 +ocfs2_find_leaf_58065 ocfs2_find_leaf 0 58065 NULL
110222 +dt3155_alloc_coherent_58073 dt3155_alloc_coherent 2 58073 NULL
110223 +cm4040_write_58079 cm4040_write 3 58079 NULL
110224 +ipv6_flowlabel_opt_58135 ipv6_flowlabel_opt 3 58135 NULL nohasharray
110225 +slhc_init_58135 slhc_init 2-1 58135 &ipv6_flowlabel_opt_58135
110226 +ocfs2_reserve_clusters_58164 ocfs2_reserve_clusters 0 58164 NULL
110227 +ext4_bg_num_gdb_58171 ext4_bg_num_gdb 0 58171 NULL
110228 +garmin_write_bulk_58191 garmin_write_bulk 3 58191 NULL
110229 +ieee80211_if_fmt_flags_58205 ieee80211_if_fmt_flags 3 58205 NULL
110230 +sysfs_add_file_mode_58222 sysfs_add_file_mode 0 58222 NULL
110231 +hva_to_pfn_58241 hva_to_pfn 1-0 58241 NULL
110232 +btrfsic_create_link_to_next_block_58246 btrfsic_create_link_to_next_block 4 58246 NULL
110233 +read_file_debug_58256 read_file_debug 3 58256 NULL
110234 +osc_max_dirty_mb_seq_write_58263 osc_max_dirty_mb_seq_write 3 58263 NULL
110235 +cfg80211_mgmt_tx_status_58266 cfg80211_mgmt_tx_status 4 58266 NULL
110236 +profile_load_58267 profile_load 3 58267 NULL
110237 +kstrtos8_from_user_58268 kstrtos8_from_user 2 58268 NULL
110238 +acpi_ds_build_internal_package_obj_58271 acpi_ds_build_internal_package_obj 3 58271 NULL
110239 +iscsi_decode_text_input_58292 iscsi_decode_text_input 4 58292 NULL
110240 +intel_alloc_coherent_58302 intel_alloc_coherent 2 58302 NULL
110241 +ieee80211_if_read_dot11MeshTTL_58307 ieee80211_if_read_dot11MeshTTL 3 58307 NULL
110242 +i915_wait_seqno_58309 i915_wait_seqno 0 58309 NULL
110243 +tx_tx_start_int_templates_read_58324 tx_tx_start_int_templates_read 3 58324 NULL
110244 +ext4_ext_truncate_extend_restart_58331 ext4_ext_truncate_extend_restart 3 58331 NULL
110245 +diva_init_dma_map_58336 diva_init_dma_map 3 58336 NULL
110246 +__copy_from_user_swizzled_58337 __copy_from_user_swizzled 2-4 58337 NULL
110247 +next_pidmap_58347 next_pidmap 2-0 58347 NULL
110248 +SyS_migrate_pages_58348 SyS_migrate_pages 2 58348 NULL
110249 +save_hint_58359 save_hint 2 58359 NULL
110250 +vmw_ttm_tt_create_58369 vmw_ttm_tt_create 2 58369 NULL nohasharray
110251 +brcmf_debugfs_sdio_counter_read_58369 brcmf_debugfs_sdio_counter_read 3 58369 &vmw_ttm_tt_create_58369
110252 +hash_ipportnet6_expire_58379 hash_ipportnet6_expire 3 58379 NULL
110253 +il_dbgfs_status_read_58388 il_dbgfs_status_read 3 58388 NULL
110254 +_drbd_md_sync_page_io_58403 _drbd_md_sync_page_io 6 58403 NULL
110255 +kvm_mmu_write_protect_pt_masked_58406 kvm_mmu_write_protect_pt_masked 3-4 58406 NULL nohasharray
110256 +idetape_pad_zeros_58406 idetape_pad_zeros 2 58406 &kvm_mmu_write_protect_pt_masked_58406
110257 +i2400m_pld_size_58415 i2400m_pld_size 0 58415 NULL
110258 +__mlx4_alloc_mtt_range_58418 __mlx4_alloc_mtt_range 2 58418 NULL
110259 +capabilities_read_58457 capabilities_read 3 58457 NULL
110260 +batadv_iv_ogm_aggr_packet_58462 batadv_iv_ogm_aggr_packet 3 58462 NULL
110261 +lpfc_idiag_baracc_read_58466 lpfc_idiag_baracc_read 3 58466 NULL nohasharray
110262 +compat_do_ipt_set_ctl_58466 compat_do_ipt_set_ctl 4 58466 &lpfc_idiag_baracc_read_58466
110263 +nv_rd08_58472 nv_rd08 0 58472 NULL
110264 +snd_gf1_read_addr_58483 snd_gf1_read_addr 0 58483 NULL
110265 +snd_rme96_capture_copy_58484 snd_rme96_capture_copy 5 58484 NULL
110266 +btrfs_cont_expand_58498 btrfs_cont_expand 2-3 58498 NULL
110267 +rndis_add_response_58544 rndis_add_response 2 58544 NULL
110268 +__clear_discard_58546 __clear_discard 2 58546 NULL
110269 +wrap_max_58548 wrap_max 0-1-2 58548 NULL
110270 +wep_decrypt_fail_read_58567 wep_decrypt_fail_read 3 58567 NULL
110271 +scnprint_mac_oui_58578 scnprint_mac_oui 3-0 58578 NULL
110272 +get_rhf_errstring_58582 get_rhf_errstring 3 58582 NULL
110273 +ea_read_inline_58589 ea_read_inline 0 58589 NULL
110274 +isku_sysfs_read_keys_thumbster_58590 isku_sysfs_read_keys_thumbster 6 58590 NULL
110275 +xip_file_read_58592 xip_file_read 3 58592 NULL
110276 +ecryptfs_write_end_58594 ecryptfs_write_end 5-3 58594 NULL
110277 +radeon_bo_size_58606 radeon_bo_size 0 58606 NULL
110278 +ebt_buf_count_58607 ebt_buf_count 0 58607 NULL
110279 +xfs_iomap_write_delay_58616 xfs_iomap_write_delay 2 58616 NULL
110280 +skb_copy_to_page_nocache_58624 skb_copy_to_page_nocache 6 58624 NULL
110281 +filemap_fdatawrite_range_58630 filemap_fdatawrite_range 0 58630 NULL
110282 +vb2_qbuf_58631 vb2_qbuf 0 58631 NULL
110283 +module_alloc_update_bounds_rx_58634 module_alloc_update_bounds_rx 1 58634 NULL
110284 +tx_tx_start_fw_gen_read_58648 tx_tx_start_fw_gen_read 3 58648 NULL
110285 +ocfs2_block_to_cluster_start_58653 ocfs2_block_to_cluster_start 2 58653 NULL
110286 +iwl_dbgfs_rx_handlers_write_58655 iwl_dbgfs_rx_handlers_write 3 58655 NULL
110287 +__gfn_to_pfn_58671 __gfn_to_pfn 2 58671 NULL
110288 +find_zero_58685 find_zero 0-1 58685 NULL
110289 +uwb_bce_print_IEs_58686 uwb_bce_print_IEs 4 58686 NULL
110290 +tps6586x_writes_58689 tps6586x_writes 2-3 58689 NULL
110291 +vmalloc_node_58700 vmalloc_node 1-2 58700 NULL
110292 +vx_send_msg_58711 vx_send_msg 0 58711 NULL
110293 +i915_gem_execbuffer_reserve_58722 i915_gem_execbuffer_reserve 0 58722 NULL
110294 +da9052_gpio_to_irq_58729 da9052_gpio_to_irq 2 58729 NULL
110295 +csum_exist_in_range_58730 csum_exist_in_range 2-3 58730 NULL
110296 +frames_to_bytes_58741 frames_to_bytes 0-2 58741 NULL
110297 +ieee80211_if_write_tkip_mic_test_58748 ieee80211_if_write_tkip_mic_test 3 58748 NULL
110298 +agp_allocate_memory_58761 agp_allocate_memory 2 58761 NULL
110299 +oblock_to_dblock_58762 oblock_to_dblock 0-2 58762 NULL
110300 +regmap_calc_reg_len_58795 regmap_calc_reg_len 0 58795 NULL
110301 +raw_send_hdrinc_58803 raw_send_hdrinc 4 58803 NULL
110302 +isku_sysfs_read_58806 isku_sysfs_read 5 58806 NULL
110303 +write_file_58812 write_file 4 58812 NULL
110304 +ep_read_58813 ep_read 3 58813 NULL
110305 +command_write_58841 command_write 3 58841 NULL
110306 +ocfs2_truncate_log_append_58850 ocfs2_truncate_log_append 3-0 58850 NULL
110307 +ath6kl_wmi_send_action_cmd_58860 ath6kl_wmi_send_action_cmd 7 58860 NULL
110308 +gs_alloc_req_58883 gs_alloc_req 2 58883 NULL
110309 +esas2r_change_queue_depth_58886 esas2r_change_queue_depth 2 58886 NULL
110310 +lprocfs_wr_pinger_recov_58914 lprocfs_wr_pinger_recov 3 58914 NULL
110311 +print_devstats_dot11FCSErrorCount_58919 print_devstats_dot11FCSErrorCount 3 58919 NULL
110312 +pipeline_cs_rx_packet_out_read_58926 pipeline_cs_rx_packet_out_read 3 58926 NULL
110313 +SyS_pread64_58935 SyS_pread64 3 58935 NULL
110314 +wait_table_hash_nr_entries_58962 wait_table_hash_nr_entries 0 58962 NULL
110315 +ieee80211_if_fmt_dot11MeshHWMPactivePathToRootTimeout_58965 ieee80211_if_fmt_dot11MeshHWMPactivePathToRootTimeout 3 58965 NULL
110316 +crypto_aead_ivsize_58970 crypto_aead_ivsize 0 58970 NULL
110317 +__mem_cgroup_try_charge_58976 __mem_cgroup_try_charge 0 58976 NULL
110318 +init_list_58990 init_list 3 58990 NULL
110319 +remap_to_cache_dirty_58991 remap_to_cache_dirty 4-3 58991 NULL
110320 +ep_write_59008 ep_write 3 59008 NULL
110321 +lpfc_idiag_baracc_write_59014 lpfc_idiag_baracc_write 3 59014 NULL
110322 +SyS_preadv_59029 SyS_preadv 3 59029 NULL
110323 +init_pci_cap_msi_perm_59033 init_pci_cap_msi_perm 2 59033 NULL
110324 +selinux_transaction_write_59038 selinux_transaction_write 3 59038 NULL
110325 +crypto_aead_reqsize_59039 crypto_aead_reqsize 0 59039 NULL
110326 +regmap_bulk_write_59049 regmap_bulk_write 2-4 59049 NULL
110327 +sysfs_link_sibling_59078 sysfs_link_sibling 0 59078 NULL
110328 +do_perf_sw_event_59099 do_perf_sw_event 3 59099 NULL
110329 +mmc_sd_num_wr_blocks_59112 mmc_sd_num_wr_blocks 0 59112 NULL
110330 +scsi_io_completion_59122 scsi_io_completion 2 59122 NULL
110331 +init_status_page_59124 init_status_page 0 59124 NULL
110332 +nfc_llcp_send_i_frame_59130 nfc_llcp_send_i_frame 3 59130 NULL
110333 +print_devstats_dot11RTSSuccessCount_59145 print_devstats_dot11RTSSuccessCount 3 59145 NULL nohasharray
110334 +framebuffer_alloc_59145 framebuffer_alloc 1 59145 &print_devstats_dot11RTSSuccessCount_59145
110335 +ocfs2_claim_local_alloc_bits_59147 ocfs2_claim_local_alloc_bits 0 59147 NULL
110336 +radeon_compat_ioctl_59150 radeon_compat_ioctl 2 59150 NULL
110337 +pvr2_hdw_report_clients_59152 pvr2_hdw_report_clients 3 59152 NULL
110338 +ksize_59176 ksize 0 59176 NULL
110339 +setup_window_59178 setup_window 4-2-5-7 59178 NULL
110340 +ocfs2_move_extent_59187 ocfs2_move_extent 3-5-2 59187 NULL
110341 +validate_exec_list_59204 validate_exec_list 0 59204 NULL
110342 +xfs_iext_realloc_indirect_59211 xfs_iext_realloc_indirect 2 59211 NULL
110343 +check_mapped_selector_name_59216 check_mapped_selector_name 5 59216 NULL
110344 +dt3155_read_59226 dt3155_read 3 59226 NULL
110345 +paging64_gpte_to_gfn_lvl_59229 paging64_gpte_to_gfn_lvl 0-1-2 59229 NULL
110346 +tty_prepare_flip_string_flags_59240 tty_prepare_flip_string_flags 4 59240 NULL
110347 +nla_len_59258 nla_len 0 59258 NULL
110348 +drbd_bm_write_page_59290 drbd_bm_write_page 2 59290 NULL
110349 +__push_leaf_right_59302 __push_leaf_right 0 59302 NULL
110350 +btrfs_insert_dir_item_59304 btrfs_insert_dir_item 4-0 59304 NULL
110351 +fd_copyout_59323 fd_copyout 3 59323 NULL
110352 +read_9287_modal_eeprom_59327 read_9287_modal_eeprom 3 59327 NULL
110353 +rx_defrag_in_process_called_read_59338 rx_defrag_in_process_called_read 3 59338 NULL
110354 +paging64_get_level1_sp_gpa_59346 paging64_get_level1_sp_gpa 0 59346 NULL nohasharray
110355 +xfs_attrmulti_attr_set_59346 xfs_attrmulti_attr_set 4 59346 &paging64_get_level1_sp_gpa_59346
110356 +__map_request_59350 __map_request 0 59350 NULL
110357 +xfs_dir2_sf_entsize_59366 xfs_dir2_sf_entsize 0-2 59366 NULL
110358 +blk_flush_policy_59368 blk_flush_policy 0 59368 NULL
110359 +f2fs_fallocate_59377 f2fs_fallocate 3-4 59377 NULL
110360 +pvr2_debugifc_print_info_59380 pvr2_debugifc_print_info 3 59380 NULL
110361 +ocfs2_replay_truncate_records_59382 ocfs2_replay_truncate_records 0 59382 NULL
110362 +journal_init_dev_59384 journal_init_dev 5 59384 NULL
110363 +isku_sysfs_read_keys_function_59412 isku_sysfs_read_keys_function 6 59412 NULL
110364 +pci_ctrl_read_59424 pci_ctrl_read 0 59424 NULL
110365 +vxge_hw_ring_rxds_per_block_get_59425 vxge_hw_ring_rxds_per_block_get 0 59425 NULL
110366 +snd_pcm_tstamp_59431 snd_pcm_tstamp 0 59431 NULL
110367 +SyS_sched_setaffinity_59442 SyS_sched_setaffinity 2 59442 NULL
110368 +fs_path_ensure_buf_59445 fs_path_ensure_buf 2-0 59445 NULL
110369 +descriptor_loc_59446 descriptor_loc 3 59446 NULL
110370 +block_rsv_use_bytes_59464 block_rsv_use_bytes 0 59464 NULL
110371 +shrink_tnc_trees_59481 shrink_tnc_trees 0 59481 NULL
110372 +btrfs_del_dir_entries_in_log_59490 btrfs_del_dir_entries_in_log 0 59490 NULL
110373 +ib_copy_from_udata_59502 ib_copy_from_udata 3 59502 NULL
110374 +rds_pin_pages_59507 rds_pin_pages 1-2 59507 NULL
110375 +ext4_resize_fs_59543 ext4_resize_fs 2 59543 NULL
110376 +mpi_get_nbits_59551 mpi_get_nbits 0 59551 NULL
110377 +tunables_write_59563 tunables_write 3 59563 NULL
110378 +__copy_from_user_ll_nozero_59571 __copy_from_user_ll_nozero 0-3 59571 NULL
110379 +write_pbl_59583 write_pbl 4 59583 NULL
110380 +memdup_user_59590 memdup_user 2 59590 NULL
110381 +tps6586x_irq_get_virq_59601 tps6586x_irq_get_virq 2 59601 NULL
110382 +mem_fwlog_free_mem_blks_read_59616 mem_fwlog_free_mem_blks_read 3 59616 NULL nohasharray
110383 +xrcdn_free_res_59616 xrcdn_free_res 5 59616 &mem_fwlog_free_mem_blks_read_59616
110384 +ath6kl_endpoint_stats_write_59621 ath6kl_endpoint_stats_write 3 59621 NULL
110385 +mtrr_write_59622 mtrr_write 3 59622 NULL
110386 +ocfs2_adjust_rightmost_branch_59623 ocfs2_adjust_rightmost_branch 0 59623 NULL
110387 +find_first_zero_bit_59636 find_first_zero_bit 0-2 59636 NULL
110388 +SyS_setdomainname_59646 SyS_setdomainname 2 59646 NULL
110389 +hidraw_read_59650 hidraw_read 3 59650 NULL
110390 +v9fs_xattr_set_acl_59651 v9fs_xattr_set_acl 4 59651 NULL
110391 +paravirt_sched_clock_59660 paravirt_sched_clock 0 59660 NULL
110392 +__devcgroup_check_permission_59665 __devcgroup_check_permission 0 59665 NULL
110393 +iwl_dbgfs_mac_params_read_59666 iwl_dbgfs_mac_params_read 3 59666 NULL
110394 +alloc_dca_provider_59670 alloc_dca_provider 2 59670 NULL
110395 +mic_calc_failure_read_59700 mic_calc_failure_read 3 59700 NULL
110396 +ioperm_get_59701 ioperm_get 4-3 59701 NULL
110397 +snd_pcm_info_user_59711 snd_pcm_info_user 0 59711 NULL
110398 +prism2_info_scanresults_59729 prism2_info_scanresults 3 59729 NULL
110399 +ieee80211_if_read_fwded_unicast_59740 ieee80211_if_read_fwded_unicast 3 59740 NULL
110400 +fat_direct_IO_59741 fat_direct_IO 4 59741 NULL
110401 +qib_decode_7220_sdma_errs_59745 qib_decode_7220_sdma_errs 4 59745 NULL
110402 +strnlen_59746 strnlen 0 59746 NULL
110403 +process_all_refs_59754 process_all_refs 0 59754 NULL nohasharray
110404 +ext3_acl_count_59754 ext3_acl_count 0-1 59754 &process_all_refs_59754
110405 +process_new_xattr_59755 process_new_xattr 0 59755 NULL
110406 +long_retry_limit_read_59766 long_retry_limit_read 3 59766 NULL
110407 +venus_remove_59781 venus_remove 4 59781 NULL
110408 +mei_nfc_recv_59784 mei_nfc_recv 3 59784 NULL
110409 +xlog_do_recover_59789 xlog_do_recover 3 59789 NULL
110410 +msb_get_zone_from_lba_59800 msb_get_zone_from_lba 0-1 59800 NULL
110411 +ipw_write_59807 ipw_write 3 59807 NULL
110412 +scsi_init_shared_tag_map_59812 scsi_init_shared_tag_map 2 59812 NULL
110413 +ieee80211_if_read_dot11MeshHWMPmaxPREQretries_59829 ieee80211_if_read_dot11MeshHWMPmaxPREQretries 3 59829 NULL
110414 +gspca_dev_probe2_59833 gspca_dev_probe2 4 59833 NULL
110415 +fs64_to_cpu_59845 fs64_to_cpu 0 59845 NULL
110416 +regmap_raw_write_async_59849 regmap_raw_write_async 2-4 59849 NULL
110417 +intel_ring_wait_request_59865 intel_ring_wait_request 0 59865 NULL
110418 +pvr2_ioread_set_sync_key_59882 pvr2_ioread_set_sync_key 3 59882 NULL
110419 +shmem_zero_setup_59885 shmem_zero_setup 0 59885 NULL nohasharray
110420 +start_transaction_59885 start_transaction 2 59885 &shmem_zero_setup_59885
110421 +l2cap_sock_recvmsg_59886 l2cap_sock_recvmsg 4 59886 NULL
110422 +ffs_prepare_buffer_59892 ffs_prepare_buffer 2 59892 NULL
110423 +ocfs2_extend_rotate_transaction_59894 ocfs2_extend_rotate_transaction 0 59894 NULL
110424 +swiotlb_map_page_59909 swiotlb_map_page 3 59909 NULL
110425 +aic7xxx_abort_waiting_scb_59932 aic7xxx_abort_waiting_scb 0 59932 NULL
110426 +kvm_mmu_notifier_invalidate_range_start_59944 kvm_mmu_notifier_invalidate_range_start 3-4 59944 NULL
110427 +ocfs2_expand_inline_ref_root_59945 ocfs2_expand_inline_ref_root 0 59945 NULL
110428 +il_dbgfs_rxon_flags_read_59950 il_dbgfs_rxon_flags_read 3 59950 NULL nohasharray
110429 +dapm_widget_power_read_file_59950 dapm_widget_power_read_file 3 59950 &il_dbgfs_rxon_flags_read_59950
110430 +lookup_node_59953 lookup_node 2 59953 NULL
110431 +il_dbgfs_missed_beacon_read_59956 il_dbgfs_missed_beacon_read 3 59956 NULL
110432 +kvm_set_cr3_59965 kvm_set_cr3 2 59965 NULL
110433 +__arch_hweight16_59975 __arch_hweight16 0 59975 NULL
110434 +osd_req_read_kern_59990 osd_req_read_kern 5 59990 NULL
110435 +ghash_async_setkey_60001 ghash_async_setkey 3 60001 NULL
110436 +ieee80211_if_fmt_dot11MeshAwakeWindowDuration_60006 ieee80211_if_fmt_dot11MeshAwakeWindowDuration 3 60006 NULL
110437 +rawsock_sendmsg_60010 rawsock_sendmsg 4 60010 NULL
110438 +mthca_init_cq_60011 mthca_init_cq 2 60011 NULL
110439 +osd_req_list_dev_partitions_60027 osd_req_list_dev_partitions 4 60027 NULL
110440 +xlog_bread_offset_60030 xlog_bread_offset 3 60030 NULL
110441 +bio_integrity_hw_sectors_60039 bio_integrity_hw_sectors 0-2 60039 NULL
110442 +do_ip6t_set_ctl_60040 do_ip6t_set_ctl 4 60040 NULL
110443 +vcs_size_60050 vcs_size 0 60050 NULL nohasharray
110444 +pin_2_irq_60050 pin_2_irq 0-3 60050 &vcs_size_60050
110445 +gru_alloc_gts_60056 gru_alloc_gts 3-2 60056 NULL
110446 +open_cur_inode_file_60057 open_cur_inode_file 0 60057 NULL
110447 +compat_writev_60063 compat_writev 3 60063 NULL
110448 +ath6kl_listen_int_write_60066 ath6kl_listen_int_write 3 60066 NULL
110449 +c4iw_num_stags_60073 c4iw_num_stags 0 60073 NULL
110450 +mp_register_gsi_60079 mp_register_gsi 2 60079 NULL
110451 +rxrpc_kernel_send_data_60083 rxrpc_kernel_send_data 3 60083 NULL
110452 +ieee80211_if_fmt_fwded_frames_60103 ieee80211_if_fmt_fwded_frames 3 60103 NULL
110453 +SYSC_msgsnd_60113 SYSC_msgsnd 3 60113 NULL
110454 +nfs_idmap_request_key_60124 nfs_idmap_request_key 2 60124 NULL
110455 +__mutex_lock_common_60134 __mutex_lock_common 0 60134 NULL
110456 +ld_usb_read_60156 ld_usb_read 3 60156 NULL
110457 +jmb38x_ms_count_slots_60164 jmb38x_ms_count_slots 0 60164 NULL
110458 +init_state_60165 init_state 2 60165 NULL
110459 +sg_build_sgat_60179 sg_build_sgat 3 60179 NULL nohasharray
110460 +jffs2_alloc_full_dirent_60179 jffs2_alloc_full_dirent 1 60179 &sg_build_sgat_60179
110461 +fuse_async_req_send_60183 fuse_async_req_send 0-3 60183 NULL
110462 +rx_rx_tkip_replays_read_60193 rx_rx_tkip_replays_read 3 60193 NULL
110463 +qib_reg_phys_mr_60202 qib_reg_phys_mr 3 60202 NULL
110464 +btrfs_get_token_16_60220 btrfs_get_token_16 0 60220 NULL
110465 +arizona_map_irq_60230 arizona_map_irq 2 60230 NULL nohasharray
110466 +__phys_addr_nodebug_60230 __phys_addr_nodebug 0 60230 &arizona_map_irq_60230
110467 +wm831x_irq_60254 wm831x_irq 2 60254 NULL
110468 +irq_alloc_domain_generic_chips_60264 irq_alloc_domain_generic_chips 3-2 60264 NULL
110469 +printer_write_60276 printer_write 3 60276 NULL
110470 +alloc_irq_pin_list_60277 alloc_irq_pin_list 1 60277 NULL
110471 +do_xip_mapping_read_60297 do_xip_mapping_read 5 60297 NULL
110472 +getDataLength_60301 getDataLength 0 60301 NULL
110473 +xfs_next_bit_60322 xfs_next_bit 0-3 60322 NULL
110474 +inorder_to_tree_60331 inorder_to_tree 1-0 60331 NULL
110475 +usb_alphatrack_write_60341 usb_alphatrack_write 3 60341 NULL
110476 +__kfifo_from_user_r_60345 __kfifo_from_user_r 5-3 60345 NULL
110477 +max_unfragmented_pages_60362 max_unfragmented_pages 0 60362 NULL
110478 +dccp_setsockopt_60367 dccp_setsockopt 5 60367 NULL
110479 +ubi_eba_atomic_leb_change_60379 ubi_eba_atomic_leb_change 0-5 60379 NULL
110480 +instruction_pointer_60384 instruction_pointer 0 60384 NULL
110481 +drop_outstanding_extent_60390 drop_outstanding_extent 0 60390 NULL
110482 +mthca_alloc_resize_buf_60394 mthca_alloc_resize_buf 3 60394 NULL
110483 +ocfs2_zero_extend_60396 ocfs2_zero_extend 3 60396 NULL
110484 +driver_names_read_60399 driver_names_read 3 60399 NULL
110485 +paging32_walk_addr_generic_60415 paging32_walk_addr_generic 4 60415 NULL
110486 +simple_alloc_urb_60420 simple_alloc_urb 3 60420 NULL
110487 +excessive_retries_read_60425 excessive_retries_read 3 60425 NULL
110488 +tstats_write_60432 tstats_write 3 60432 NULL nohasharray
110489 +kmalloc_60432 kmalloc 1 60432 &tstats_write_60432
110490 +scaled_div32_60442 scaled_div32 2-1-0 60442 NULL
110491 +snd_hda_get_num_raw_conns_60462 snd_hda_get_num_raw_conns 0 60462 NULL
110492 +crypto_shash_setkey_60483 crypto_shash_setkey 3 60483 NULL
110493 +lustre_msg_early_size_60496 lustre_msg_early_size 0 60496 NULL
110494 +v9fs_fid_readn_60544 v9fs_fid_readn 4 60544 NULL
110495 +nonpaging_map_60551 nonpaging_map 4 60551 NULL
110496 +osc_lockless_truncate_seq_write_60553 osc_lockless_truncate_seq_write 3 60553 NULL
110497 +tracing_entries_write_60563 tracing_entries_write 3 60563 NULL
110498 +hash_net6_expire_60598 hash_net6_expire 3 60598 NULL
110499 +btrfs_add_delayed_tree_ref_60602 btrfs_add_delayed_tree_ref 0 60602 NULL
110500 +skb_transport_offset_60619 skb_transport_offset 0 60619 NULL
110501 +wl1273_fm_fops_write_60621 wl1273_fm_fops_write 3 60621 NULL
110502 +usb_control_msg_60624 usb_control_msg 0 60624 NULL
110503 +acl_alloc_stack_init_60630 acl_alloc_stack_init 1 60630 NULL
110504 +free_dind_blocks_60635 free_dind_blocks 0 60635 NULL
110505 +ubifs_recover_leb_60639 ubifs_recover_leb 3 60639 NULL
110506 +__proc_lnet_stats_60647 __proc_lnet_stats 5 60647 NULL
110507 +mv_ffc64_60648 mv_ffc64 1 60648 NULL
110508 +if_sdio_host_to_card_60666 if_sdio_host_to_card 4 60666 NULL
110509 +run_clustered_refs_60668 run_clustered_refs 0 60668 NULL
110510 +ieee80211_if_read_dot11MeshConfirmTimeout_60670 ieee80211_if_read_dot11MeshConfirmTimeout 3 60670 NULL
110511 +btrfs_reserve_extent_60674 btrfs_reserve_extent 5-0 60674 NULL
110512 +init_data_container_60709 init_data_container 1 60709 NULL
110513 +vga_rcrt_60731 vga_rcrt 0 60731 NULL
110514 +snd_ice1712_ds_read_60754 snd_ice1712_ds_read 0 60754 NULL
110515 +raid_status_60755 raid_status 5 60755 NULL
110516 +sel_write_checkreqprot_60774 sel_write_checkreqprot 3 60774 NULL
110517 +opticon_write_60775 opticon_write 4 60775 NULL
110518 +acl_alloc_num_60778 acl_alloc_num 1-2 60778 NULL
110519 +snd_pcm_oss_readv3_60792 snd_pcm_oss_readv3 3 60792 NULL
110520 +block_rsv_migrate_bytes_60843 block_rsv_migrate_bytes 0 60843 NULL
110521 +pwr_tx_with_ps_read_60851 pwr_tx_with_ps_read 3 60851 NULL
110522 +alloc_buf_60864 alloc_buf 2-3 60864 NULL
110523 +generic_writepages_60871 generic_writepages 0 60871 NULL
110524 +tipc_createport_60875 tipc_createport 4 60875 NULL
110525 +ubifs_read_one_lp_60882 ubifs_read_one_lp 0 60882 NULL
110526 +ext4_update_inline_data_60888 ext4_update_inline_data 3 60888 NULL
110527 +iio_debugfs_read_reg_60908 iio_debugfs_read_reg 3 60908 NULL
110528 +libcfs_sock_ioctl_60915 libcfs_sock_ioctl 0 60915 NULL
110529 +mgt_set_varlen_60916 mgt_set_varlen 4 60916 NULL
110530 +scrub_chunk_60926 scrub_chunk 5 60926 NULL
110531 +submit_extent_page_60928 submit_extent_page 5 60928 NULL
110532 +pti_char_write_60960 pti_char_write 3 60960 NULL
110533 +mwifiex_alloc_sdio_mpa_buffers_60961 mwifiex_alloc_sdio_mpa_buffers 3-2 60961 NULL
110534 +__a2mp_build_60987 __a2mp_build 3 60987 NULL
110535 +hsc_msg_alloc_60990 hsc_msg_alloc 1 60990 NULL
110536 +ath6kl_lrssi_roam_read_61022 ath6kl_lrssi_roam_read 3 61022 NULL
110537 +graph_depth_write_61024 graph_depth_write 3 61024 NULL
110538 +sdhci_pltfm_register_61031 sdhci_pltfm_register 3 61031 NULL
110539 +lpfc_idiag_queacc_write_61043 lpfc_idiag_queacc_write 3 61043 NULL
110540 +symtab_init_61050 symtab_init 2 61050 NULL
110541 +fuse_send_write_61053 fuse_send_write 0-4 61053 NULL
110542 +snd_pcm_pause_61054 snd_pcm_pause 0 61054 NULL
110543 +bitmap_scnlistprintf_61062 bitmap_scnlistprintf 0-4-2 61062 NULL
110544 +ahash_align_buffer_size_61070 ahash_align_buffer_size 0-1-2 61070 NULL
110545 +journal_stop_61080 journal_stop 0 61080 NULL
110546 +snd_pcm_update_hw_ptr0_61084 snd_pcm_update_hw_ptr0 0 61084 NULL
110547 +get_derived_key_61100 get_derived_key 4 61100 NULL
110548 +mem_cgroup_cache_charge_61101 mem_cgroup_cache_charge 0 61101 NULL
110549 +i40e_calculate_l2fpm_size_61104 i40e_calculate_l2fpm_size 0-1-2-3-4 61104 NULL
110550 +alloc_chrdev_region_61112 alloc_chrdev_region 0 61112 NULL
110551 +__probe_kernel_read_61119 __probe_kernel_read 3 61119 NULL
110552 +vmemmap_alloc_block_buf_61126 vmemmap_alloc_block_buf 1 61126 NULL
110553 +afs_proc_cells_write_61139 afs_proc_cells_write 3 61139 NULL
110554 +brcmf_sdio_chip_cr4_exitdl_61143 brcmf_sdio_chip_cr4_exitdl 4 61143 NULL
110555 +__vmalloc_61168 __vmalloc 1 61168 NULL
110556 +event_oom_late_read_61175 event_oom_late_read 3 61175 NULL nohasharray
110557 +pair_device_61175 pair_device 4 61175 &event_oom_late_read_61175
110558 +dio_bio_add_page_61178 dio_bio_add_page 0 61178 NULL
110559 +btrfs_reloc_post_snapshot_61189 btrfs_reloc_post_snapshot 0 61189 NULL
110560 +SyS_prctl_61202 SyS_prctl 4 61202 NULL
110561 +arch_hibernation_header_save_61212 arch_hibernation_header_save 0 61212 NULL
110562 +smk_read_ambient_61220 smk_read_ambient 3 61220 NULL
110563 +__verify_planes_array_61249 __verify_planes_array 0 61249 NULL
110564 +find_get_pages_tag_61270 find_get_pages_tag 0 61270 NULL nohasharray
110565 +ifalias_store_61270 ifalias_store 4 61270 &find_get_pages_tag_61270 nohasharray
110566 +btrfs_bio_alloc_61270 btrfs_bio_alloc 3 61270 &ifalias_store_61270
110567 +kick_a_thread_61273 kick_a_thread 0 61273 NULL
110568 +hfsplus_getxattr_finder_info_61283 hfsplus_getxattr_finder_info 0 61283 NULL nohasharray
110569 +vortex_adbdma_getlinearpos_61283 vortex_adbdma_getlinearpos 0 61283 &hfsplus_getxattr_finder_info_61283
110570 +nvme_trans_copy_to_user_61288 nvme_trans_copy_to_user 3 61288 NULL
110571 +ext4_issue_discard_61305 ext4_issue_discard 2 61305 NULL
110572 +xfer_from_user_61307 xfer_from_user 3 61307 NULL
110573 +ocfs2_get_sector_61309 ocfs2_get_sector 4 61309 NULL
110574 +timespec_to_ns_61317 timespec_to_ns 0 61317 NULL
110575 +xfrm_user_sec_ctx_size_61320 xfrm_user_sec_ctx_size 0 61320 NULL
110576 +C_SYSC_msgsnd_61330 C_SYSC_msgsnd 3 61330 NULL
110577 +write_file_spectral_short_repeat_61335 write_file_spectral_short_repeat 3 61335 NULL
110578 +__fls_61340 __fls 0 61340 NULL nohasharray
110579 +st5481_setup_isocpipes_61340 st5481_setup_isocpipes 6-4 61340 &__fls_61340
110580 +do_splice_direct_61341 do_splice_direct 5 61341 NULL nohasharray
110581 +rx_rx_wa_ba_not_expected_read_61341 rx_rx_wa_ba_not_expected_read 3 61341 &do_splice_direct_61341
110582 +__dm_get_reserved_ios_61342 __dm_get_reserved_ios 0-2-3 61342 NULL
110583 +f1x_map_sysaddr_to_csrow_61344 f1x_map_sysaddr_to_csrow 2 61344 NULL
110584 +kvm_apic_id_61363 kvm_apic_id 0 61363 NULL
110585 +debug_debug4_read_61367 debug_debug4_read 3 61367 NULL
110586 +get_inode_info_61387 get_inode_info 0 61387 NULL
110587 +system_enable_write_61396 system_enable_write 3 61396 NULL
110588 +size_entry_mwt_61400 size_entry_mwt 0 61400 NULL
110589 +xfs_zero_remaining_bytes_61423 xfs_zero_remaining_bytes 3 61423 NULL
110590 +i915_emit_box_61436 i915_emit_box 0 61436 NULL
110591 +dma_ops_area_alloc_61440 dma_ops_area_alloc 3-4-5-0 61440 NULL
110592 +tc3589x_irq_unmap_61447 tc3589x_irq_unmap 2 61447 NULL
110593 +unix_stream_sendmsg_61455 unix_stream_sendmsg 4 61455 NULL
110594 +snd_pcm_lib_writev_transfer_61483 snd_pcm_lib_writev_transfer 5-4-2 61483 NULL
110595 +btrfs_item_size_61485 btrfs_item_size 0 61485 NULL
110596 +ocfs2_get_refcount_rec_61514 ocfs2_get_refcount_rec 0 61514 NULL
110597 +__vmalloc_area_node_61525 __vmalloc_area_node 4 61525 NULL
110598 +erst_errno_61526 erst_errno 0 61526 NULL
110599 +ntfs_attr_lookup_61539 ntfs_attr_lookup 0 61539 NULL
110600 +get_ohm_of_thermistor_61545 get_ohm_of_thermistor 2 61545 NULL
110601 +trace_options_core_write_61551 trace_options_core_write 3 61551 NULL
110602 +o2hb_pop_count_61553 o2hb_pop_count 2 61553 NULL
110603 +dvb_net_ioctl_61559 dvb_net_ioctl 2 61559 NULL
110604 +parport_pc_fifo_write_block_dma_61568 parport_pc_fifo_write_block_dma 3 61568 NULL
110605 +fan_proc_write_61569 fan_proc_write 3 61569 NULL
110606 +ieee80211_if_read_rc_rateidx_mask_2ghz_61570 ieee80211_if_read_rc_rateidx_mask_2ghz 3 61570 NULL
110607 +ldlm_pool_rw_atomic_seq_write_61572 ldlm_pool_rw_atomic_seq_write 3 61572 NULL
110608 +seq_open_private_61589 seq_open_private 3 61589 NULL
110609 +ept_gpte_to_gfn_lvl_61591 ept_gpte_to_gfn_lvl 0-2-1 61591 NULL
110610 +netlink_recvmsg_61600 netlink_recvmsg 4 61600 NULL
110611 +nfs4_init_uniform_client_string_61601 nfs4_init_uniform_client_string 3 61601 NULL
110612 +configfs_write_file_61621 configfs_write_file 3 61621 NULL
110613 +ieee80211_if_fmt_hw_queues_61629 ieee80211_if_fmt_hw_queues 3 61629 NULL
110614 +i2o_parm_table_get_61635 i2o_parm_table_get 6 61635 NULL
110615 +snd_pcm_oss_read3_61643 snd_pcm_oss_read3 0-3 61643 NULL
110616 +resize_stripes_61650 resize_stripes 2 61650 NULL
110617 +ttm_page_pool_free_61661 ttm_page_pool_free 2-0 61661 NULL
110618 +insert_one_name_61668 insert_one_name 7 61668 NULL
110619 +snd_pcm_playback_avail_61671 snd_pcm_playback_avail 0 61671 NULL
110620 +qib_format_hwmsg_61679 qib_format_hwmsg 2 61679 NULL
110621 +lock_loop_61681 lock_loop 1 61681 NULL
110622 +__do_tune_cpucache_61684 __do_tune_cpucache 2 61684 NULL
110623 +filter_read_61692 filter_read 3 61692 NULL
110624 +prog_page_61711 prog_page 2 61711 NULL
110625 +iov_length_61716 iov_length 0 61716 NULL
110626 +fragmentation_threshold_read_61718 fragmentation_threshold_read 3 61718 NULL
110627 +null_alloc_reqbuf_61719 null_alloc_reqbuf 3 61719 NULL
110628 +read_file_interrupt_61742 read_file_interrupt 3 61742 NULL nohasharray
110629 +read_file_regval_61742 read_file_regval 3 61742 &read_file_interrupt_61742
110630 +SyS_sendto_61763 SyS_sendto 6 61763 NULL
110631 +gfs2_meta_wait_61773 gfs2_meta_wait 0 61773 NULL
110632 +num_counter_active_61789 num_counter_active 0 61789 NULL
110633 +mls_compute_context_len_61812 mls_compute_context_len 0 61812 NULL
110634 +tps80031_irq_init_61830 tps80031_irq_init 3 61830 NULL
110635 +bfad_debugfs_write_regwr_61841 bfad_debugfs_write_regwr 3 61841 NULL
110636 +regcache_sync_block_61846 regcache_sync_block 4-5 61846 NULL
110637 +fs_path_prepare_for_add_61854 fs_path_prepare_for_add 2-0 61854 NULL
110638 +evdev_compute_buffer_size_61863 evdev_compute_buffer_size 0 61863 NULL
110639 +SYSC_lsetxattr_61869 SYSC_lsetxattr 4 61869 NULL
110640 +get_fw_name_61874 get_fw_name 3 61874 NULL
110641 +twl4030_sih_setup_61878 twl4030_sih_setup 3-0 61878 NULL
110642 +btrfs_ioctl_clone_61886 btrfs_ioctl_clone 4-3-5 61886 NULL
110643 +lprocfs_write_frac_u64_helper_61897 lprocfs_write_frac_u64_helper 2 61897 NULL
110644 +lov_mds_md_stripecnt_61899 lov_mds_md_stripecnt 0-1 61899 NULL
110645 +clear_refs_write_61904 clear_refs_write 3 61904 NULL
110646 +rx_filter_arp_filter_read_61914 rx_filter_arp_filter_read 3 61914 NULL
110647 +au0828_init_isoc_61917 au0828_init_isoc 3-2-4 61917 NULL
110648 +sctp_sendmsg_61919 sctp_sendmsg 4 61919 NULL
110649 +ocfs2_reserve_new_metadata_blocks_61926 ocfs2_reserve_new_metadata_blocks 0 61926 NULL
110650 +SyS_kexec_load_61946 SyS_kexec_load 2 61946 NULL
110651 +gfn_to_pfn_memslot_atomic_61947 gfn_to_pfn_memslot_atomic 2 61947 NULL
110652 +il4965_ucode_rx_stats_read_61948 il4965_ucode_rx_stats_read 3 61948 NULL
110653 +squashfs_read_id_index_table_61961 squashfs_read_id_index_table 4 61961 NULL
110654 +fix_read_error_61965 fix_read_error 4 61965 NULL
110655 +mlx4_alloc_mtt_range_61966 mlx4_alloc_mtt_range 2 61966 NULL
110656 +ocfs2_quota_write_61972 ocfs2_quota_write 5-4 61972 NULL
110657 +fd_locked_ioctl_61978 fd_locked_ioctl 3 61978 NULL
110658 +cow_file_range_61979 cow_file_range 3 61979 NULL
110659 +module_alloc_exec_61991 module_alloc_exec 1 61991 NULL
110660 +dequeue_event_62000 dequeue_event 3 62000 NULL
110661 +xt_compat_match_offset_62011 xt_compat_match_offset 0 62011 NULL
110662 +SyS_setxattr_62019 SyS_setxattr 4 62019 NULL
110663 +jffs2_do_unlink_62020 jffs2_do_unlink 4 62020 NULL
110664 +SYSC_select_62024 SYSC_select 1 62024 NULL
110665 +pmcraid_build_passthrough_ioadls_62034 pmcraid_build_passthrough_ioadls 2 62034 NULL
110666 +sctp_user_addto_chunk_62047 sctp_user_addto_chunk 2-3 62047 NULL
110667 +do_pselect_62061 do_pselect 1 62061 NULL
110668 +pcpu_alloc_bootmem_62074 pcpu_alloc_bootmem 2 62074 NULL
110669 +get_domain_for_dev_62099 get_domain_for_dev 2 62099 NULL
110670 +ipath_user_sdma_pin_pages_62100 ipath_user_sdma_pin_pages 3-5-4 62100 NULL
110671 +jffs2_security_setxattr_62107 jffs2_security_setxattr 4 62107 NULL
110672 +btrfs_direct_IO_62114 btrfs_direct_IO 4 62114 NULL
110673 +ip_recv_error_62117 ip_recv_error 3 62117 NULL
110674 +generic_block_fiemap_62122 generic_block_fiemap 4 62122 NULL
110675 +llc_ui_header_len_62131 llc_ui_header_len 0 62131 NULL
110676 +qib_diag_write_62133 qib_diag_write 3 62133 NULL nohasharray
110677 +kobject_add_varg_62133 kobject_add_varg 0 62133 &qib_diag_write_62133
110678 +ql_status_62135 ql_status 5 62135 NULL nohasharray
110679 +device_add_attrs_62135 device_add_attrs 0 62135 &ql_status_62135
110680 +video_usercopy_62151 video_usercopy 2 62151 NULL
110681 +wrmWithLock_62164 wrmWithLock 0 62164 NULL
110682 +SyS_getxattr_62166 SyS_getxattr 4 62166 NULL
110683 +prism54_wpa_bss_ie_get_62173 prism54_wpa_bss_ie_get 0 62173 NULL
110684 +write_file_dfs_62180 write_file_dfs 3 62180 NULL
110685 +alloc_upcall_62186 alloc_upcall 2 62186 NULL
110686 +btrfs_xattr_acl_set_62203 btrfs_xattr_acl_set 4 62203 NULL
110687 +sock_kmalloc_62205 sock_kmalloc 2 62205 NULL
110688 +SYSC_setgroups16_62232 SYSC_setgroups16 1 62232 NULL
110689 +nfsd_read_file_62241 nfsd_read_file 6 62241 NULL
110690 +allocate_partition_62245 allocate_partition 4 62245 NULL
110691 +security_file_permission_62278 security_file_permission 0 62278 NULL
110692 +get_random_int_62279 get_random_int 0 62279 NULL
110693 +__qib_get_user_pages_62287 __qib_get_user_pages 1-2 62287 NULL
110694 +il_dbgfs_sram_read_62296 il_dbgfs_sram_read 3 62296 NULL
110695 +sparse_early_usemaps_alloc_pgdat_section_62304 sparse_early_usemaps_alloc_pgdat_section 2 62304 NULL
110696 +ocfs2_find_victim_alloc_group_62306 ocfs2_find_victim_alloc_group 0 62306 NULL
110697 +subsystem_filter_read_62310 subsystem_filter_read 3 62310 NULL
110698 +timespec_to_jiffies_62321 timespec_to_jiffies 0 62321 NULL
110699 +Wb35Reg_BurstWrite_62327 Wb35Reg_BurstWrite 4 62327 NULL
110700 +subseq_list_62332 subseq_list 3-0 62332 NULL
110701 +ll_statahead_max_seq_write_62333 ll_statahead_max_seq_write 3 62333 NULL
110702 +flash_write_62354 flash_write 3 62354 NULL
110703 +xfpregs_set_62363 xfpregs_set 4 62363 NULL
110704 +rx_rx_timeout_read_62389 rx_rx_timeout_read 3 62389 NULL
110705 +altera_irscan_62396 altera_irscan 2 62396 NULL
110706 +set_ssp_62411 set_ssp 4 62411 NULL
110707 +mlx4_en_create_rx_ring_62498 mlx4_en_create_rx_ring 3 62498 NULL
110708 +ext_rts51x_sd_execute_read_data_62501 ext_rts51x_sd_execute_read_data 9 62501 NULL
110709 +mtip_get_next_rr_node_62502 mtip_get_next_rr_node 0 62502 NULL
110710 +ocfs2_path_bh_journal_access_62504 ocfs2_path_bh_journal_access 0 62504 NULL
110711 +pep_sendmsg_62524 pep_sendmsg 4 62524 NULL
110712 +test_iso_queue_62534 test_iso_queue 5 62534 NULL nohasharray
110713 +__kmalloc_node_track_caller_62534 __kmalloc_node_track_caller 3 62534 &test_iso_queue_62534
110714 +debugfs_read_62535 debugfs_read 3 62535 NULL
110715 +sco_sock_sendmsg_62542 sco_sock_sendmsg 4 62542 NULL
110716 +qib_refresh_qsfp_cache_62547 qib_refresh_qsfp_cache 0 62547 NULL
110717 +xfrm_user_policy_62573 xfrm_user_policy 4 62573 NULL
110718 +get_subdir_62581 get_subdir 3 62581 NULL
110719 +nfsd_vfs_read_62605 nfsd_vfs_read 6 62605 NULL
110720 +get_desc_base_62617 get_desc_base 0 62617 NULL
110721 +iommu_area_alloc_62619 iommu_area_alloc 2-3-4-7-0 62619 NULL
110722 +vfs_fsync_range_62635 vfs_fsync_range 0 62635 NULL
110723 +lpfc_sli4_queue_alloc_62646 lpfc_sli4_queue_alloc 3 62646 NULL
110724 +write_62671 write 3 62671 NULL
110725 +ocfs2_wait_for_mask_interruptible_62675 ocfs2_wait_for_mask_interruptible 0 62675 NULL
110726 +printer_req_alloc_62687 printer_req_alloc 2 62687 NULL
110727 +ext4_ind_map_blocks_62690 ext4_ind_map_blocks 0 62690 NULL
110728 +htb_remove_class_from_row_62692 htb_remove_class_from_row 3 62692 NULL
110729 +qla4_83xx_rd_reg_62693 qla4_83xx_rd_reg 0 62693 NULL
110730 +bioset_integrity_create_62708 bioset_integrity_create 2 62708 NULL
110731 +gfs2_log_write_62717 gfs2_log_write 3 62717 NULL
110732 +rdm_62719 rdm 0 62719 NULL
110733 +obd_ioctl_popdata_62741 obd_ioctl_popdata 3 62741 NULL
110734 +key_replays_read_62746 key_replays_read 3 62746 NULL
110735 +lov_verify_lmm_62747 lov_verify_lmm 2 62747 NULL
110736 +mwifiex_rdeeprom_write_62754 mwifiex_rdeeprom_write 3 62754 NULL
110737 +ax25_sendmsg_62770 ax25_sendmsg 4 62770 NULL
110738 +page_key_alloc_62771 page_key_alloc 0 62771 NULL
110739 +C_SYSC_ipc_62776 C_SYSC_ipc 3 62776 NULL
110740 +SyS_sched_getaffinity_62786 SyS_sched_getaffinity 2 62786 NULL
110741 +dm_stats_account_io_62787 dm_stats_account_io 3 62787 NULL
110742 +posix_acl_valid_62788 posix_acl_valid 0 62788 NULL
110743 +is_first_ref_62805 is_first_ref 0 62805 NULL
110744 +tracing_total_entries_read_62817 tracing_total_entries_read 3 62817 NULL
110745 +name_cache_insert_62822 name_cache_insert 0 62822 NULL
110746 +BeceemEEPROMBulkRead_62835 BeceemEEPROMBulkRead 0 62835 NULL
110747 +__rounddown_pow_of_two_62836 __rounddown_pow_of_two 0 62836 NULL
110748 +bio_get_nr_vecs_62838 bio_get_nr_vecs 0 62838 NULL
110749 +xlog_recover_add_to_trans_62839 xlog_recover_add_to_trans 4 62839 NULL
110750 +rx_fcs_err_read_62844 rx_fcs_err_read 3 62844 NULL
110751 +set_swbp_62853 set_swbp 3 62853 NULL
110752 +read_nic_io_dword_62859 read_nic_io_dword 0 62859 NULL
110753 +l2tp_ip6_recvmsg_62874 l2tp_ip6_recvmsg 4 62874 NULL
110754 +aoechr_write_62883 aoechr_write 3 62883 NULL
110755 +if_spi_host_to_card_62890 if_spi_host_to_card 4 62890 NULL
110756 +ocfs2_validate_gd_parent_62905 ocfs2_validate_gd_parent 0 62905 NULL
110757 +mempool_create_slab_pool_62907 mempool_create_slab_pool 1 62907 NULL
110758 +getdqbuf_62908 getdqbuf 1 62908 NULL
110759 +try_async_pf_62914 try_async_pf 3 62914 NULL nohasharray
110760 +SyS_remap_file_pages_62914 SyS_remap_file_pages 1-2 62914 &try_async_pf_62914
110761 +ll_statahead_agl_seq_write_62928 ll_statahead_agl_seq_write 3 62928 NULL
110762 +atomic64_sub_return_62941 atomic64_sub_return 1 62941 NULL
110763 +agp_create_user_memory_62955 agp_create_user_memory 1 62955 NULL
110764 +send_write_62969 send_write 0-3 62969 NULL
110765 +__ext3_journal_stop_63017 __ext3_journal_stop 0 63017 NULL
110766 +alloc_mem_cgroup_per_zone_info_63024 alloc_mem_cgroup_per_zone_info 2 63024 NULL
110767 +kstrtoull_from_user_63026 kstrtoull_from_user 2 63026 NULL
110768 +PTR_ERR_63033 PTR_ERR 0 63033 NULL nohasharray
110769 +__vb2_perform_fileio_63033 __vb2_perform_fileio 3 63033 &PTR_ERR_63033
110770 +pipeline_defrag_to_csum_swi_read_63037 pipeline_defrag_to_csum_swi_read 3 63037 NULL
110771 +scsi_host_alloc_63041 scsi_host_alloc 2 63041 NULL
110772 +run_delayed_tree_ref_63042 run_delayed_tree_ref 0 63042 NULL
110773 +unlink1_63059 unlink1 3 63059 NULL
110774 +__do_munmap_63063 __do_munmap 0 63063 NULL
110775 +xen_set_nslabs_63066 xen_set_nslabs 0 63066 NULL
110776 +iwl_dbgfs_fw_rx_stats_read_63070 iwl_dbgfs_fw_rx_stats_read 3 63070 NULL
110777 +ocfs2_decrease_refcount_63078 ocfs2_decrease_refcount 4-3-0 63078 NULL
110778 +find_extent_in_eb_63082 find_extent_in_eb 0 63082 NULL
110779 +sep_prepare_input_output_dma_table_in_dcb_63087 sep_prepare_input_output_dma_table_in_dcb 4-5-3-2 63087 NULL
110780 +__ocfs2_flush_truncate_log_63097 __ocfs2_flush_truncate_log 0 63097 NULL
110781 +iwl_dbgfs_sensitivity_read_63116 iwl_dbgfs_sensitivity_read 3 63116 NULL
110782 +ext4_chunk_trans_blocks_63123 ext4_chunk_trans_blocks 0-2 63123 NULL
110783 +alloc_cblock_63133 alloc_cblock 2 63133 NULL
110784 +snd_pcm_status_user_63140 snd_pcm_status_user 0 63140 NULL
110785 +ubifs_change_one_lp_63157 ubifs_change_one_lp 0 63157 NULL
110786 +smk_write_revoke_subj_63173 smk_write_revoke_subj 3 63173 NULL
110787 +SyS_syslog_63178 SyS_syslog 3 63178 NULL
110788 +security_policydb_len_63198 security_policydb_len 0 63198 NULL
110789 +vme_master_read_63221 vme_master_read 0 63221 NULL
110790 +SyS_gethostname_63227 SyS_gethostname 2 63227 NULL
110791 +module_alloc_update_bounds_rw_63233 module_alloc_update_bounds_rw 1 63233 NULL
110792 +ptp_read_63251 ptp_read 4 63251 NULL
110793 +xfs_dir2_leaf_getdents_63262 xfs_dir2_leaf_getdents 3 63262 NULL
110794 +ntfs_attr_can_be_non_resident_63267 ntfs_attr_can_be_non_resident 0 63267 NULL
110795 +raid5_resize_63306 raid5_resize 2 63306 NULL
110796 +proc_info_read_63344 proc_info_read 3 63344 NULL
110797 +ps_upsd_max_sptime_read_63362 ps_upsd_max_sptime_read 3 63362 NULL
110798 +idmouse_read_63374 idmouse_read 3 63374 NULL
110799 +edac_pci_alloc_ctl_info_63388 edac_pci_alloc_ctl_info 1 63388 NULL nohasharray
110800 +usbnet_read_cmd_nopm_63388 usbnet_read_cmd_nopm 7 63388 &edac_pci_alloc_ctl_info_63388
110801 +rxpipe_missed_beacon_host_int_trig_rx_data_read_63405 rxpipe_missed_beacon_host_int_trig_rx_data_read 3 63405 NULL
110802 +nouveau_event_create_63411 nouveau_event_create 1 63411 NULL
110803 +l2cap_sock_sendmsg_63427 l2cap_sock_sendmsg 4 63427 NULL
110804 +sep_prepare_input_output_dma_table_63429 sep_prepare_input_output_dma_table 3-4-2 63429 NULL
110805 +gfn_to_hva_many_63437 gfn_to_hva_many 2-0 63437 NULL
110806 +nfsd_symlink_63442 nfsd_symlink 6 63442 NULL
110807 +lookup_dir_item_inode_63447 lookup_dir_item_inode 0 63447 NULL
110808 +i915_gem_object_get_fence_63455 i915_gem_object_get_fence 0 63455 NULL
110809 +__do_kmalloc_node_63461 __do_kmalloc_node 3 63461 NULL
110810 +si5351_bulk_write_63468 si5351_bulk_write 2-3 63468 NULL
110811 +snd_info_entry_write_63474 snd_info_entry_write 3 63474 NULL
110812 +do_work_63483 do_work 0 63483 NULL
110813 +reada_find_extent_63486 reada_find_extent 2 63486 NULL
110814 +__copy_from_user_ll_nocache_63487 __copy_from_user_ll_nocache 0-3 63487 NULL
110815 +read_kcore_63488 read_kcore 3 63488 NULL nohasharray
110816 +alloc_reserved_tree_block_63488 alloc_reserved_tree_block 0 63488 &read_kcore_63488
110817 +save_hint_63497 save_hint 2 63497 NULL
110818 +wl1271_tx_min_rate_get_63498 wl1271_tx_min_rate_get 2 63498 NULL
110819 +sis_voice_irq_63501 sis_voice_irq 1 63501 NULL
110820 +snd_pcm_plug_write_transfer_63503 snd_pcm_plug_write_transfer 0-3 63503 NULL
110821 +efx_mcdi_rpc_async_63529 efx_mcdi_rpc_async 5-4 63529 NULL
110822 +ubi_more_leb_change_data_63534 ubi_more_leb_change_data 4-0 63534 NULL
110823 +write_file_spectral_period_63536 write_file_spectral_period 3 63536 NULL
110824 +if_sdio_read_scratch_63540 if_sdio_read_scratch 0 63540 NULL
110825 +append_to_buffer_63550 append_to_buffer 3 63550 NULL
110826 +dbg_leb_write_63555 dbg_leb_write 4-5 63555 NULL nohasharray
110827 +kvm_write_guest_page_63555 kvm_write_guest_page 2-5 63555 &dbg_leb_write_63555
110828 +ubifs_lpt_scan_nolock_63572 ubifs_lpt_scan_nolock 0 63572 NULL
110829 +iwch_reg_user_mr_63575 iwch_reg_user_mr 2-3 63575 NULL
110830 +ocfs2_calc_trunc_pos_63576 ocfs2_calc_trunc_pos 4 63576 NULL
110831 +rproc_alloc_63577 rproc_alloc 5 63577 NULL
110832 +ext3_clear_blocks_63597 ext3_clear_blocks 4-5 63597 NULL
110833 +write_debug_level_63613 write_debug_level 3 63613 NULL
110834 +module_alloc_63630 module_alloc 1 63630 NULL
110835 +ntfs_malloc_nofs_nofail_63631 ntfs_malloc_nofs_nofail 1 63631 NULL
110836 +symbol_build_supp_rates_63634 symbol_build_supp_rates 0 63634 NULL
110837 +_ubh_find_next_zero_bit__63640 _ubh_find_next_zero_bit_ 3-5-4 63640 NULL
110838 +ext4_ext_get_access_63642 ext4_ext_get_access 0 63642 NULL
110839 +proc_loginuid_write_63648 proc_loginuid_write 3 63648 NULL
110840 +ValidateDSDParamsChecksum_63654 ValidateDSDParamsChecksum 3 63654 NULL
110841 +ldlm_cli_enqueue_63657 ldlm_cli_enqueue 8 63657 NULL
110842 +hidraw_ioctl_63658 hidraw_ioctl 2 63658 NULL
110843 +vbi_read_63673 vbi_read 3 63673 NULL nohasharray
110844 +xen_register_pirq_63673 xen_register_pirq 1-2 63673 &vbi_read_63673
110845 +write_file_spectral_fft_period_63696 write_file_spectral_fft_period 3 63696 NULL
110846 +bin_search_63697 bin_search 0 63697 NULL
110847 +arizona_irq_map_63709 arizona_irq_map 2 63709 NULL
110848 +ocfs2_et_root_journal_access_63713 ocfs2_et_root_journal_access 0 63713 NULL
110849 +nouveau_object_create__63715 nouveau_object_create_ 5 63715 NULL
110850 +btrfs_insert_delayed_dir_index_63720 btrfs_insert_delayed_dir_index 4-0 63720 NULL
110851 +selinux_secctx_to_secid_63744 selinux_secctx_to_secid 2 63744 NULL
110852 +snd_pcm_oss_read1_63771 snd_pcm_oss_read1 3 63771 NULL
110853 +snd_pcm_link_63772 snd_pcm_link 0 63772 NULL
110854 +snd_opl4_mem_proc_read_63774 snd_opl4_mem_proc_read 5 63774 NULL
110855 +spidev_compat_ioctl_63778 spidev_compat_ioctl 2 63778 NULL
110856 +mwifiex_11n_create_rx_reorder_tbl_63806 mwifiex_11n_create_rx_reorder_tbl 4 63806 NULL
110857 +copy_nodes_to_user_63807 copy_nodes_to_user 2 63807 NULL
110858 +prepare_copy_63826 prepare_copy 2 63826 NULL
110859 +sel_write_load_63830 sel_write_load 3 63830 NULL
110860 +ll_readlink_63836 ll_readlink 3 63836 NULL
110861 +IsSectionWritable_63842 IsSectionWritable 0 63842 NULL
110862 +proc_pid_attr_write_63845 proc_pid_attr_write 3 63845 NULL
110863 +divas_write_63901 divas_write 3 63901 NULL
110864 +IsOffsetWritable_63902 IsOffsetWritable 0 63902 NULL nohasharray
110865 +xhci_alloc_stream_info_63902 xhci_alloc_stream_info 3 63902 &IsOffsetWritable_63902
110866 +uvc_alloc_urb_buffers_63922 uvc_alloc_urb_buffers 0-2-3 63922 NULL
110867 +snd_compr_write_63923 snd_compr_write 3 63923 NULL
110868 +acpi_ev_get_gpe_xrupt_block_63924 acpi_ev_get_gpe_xrupt_block 1 63924 NULL
110869 +tipc_send2port_63935 tipc_send2port 5 63935 NULL
110870 +afs_send_simple_reply_63940 afs_send_simple_reply 3 63940 NULL
110871 +__team_options_register_63941 __team_options_register 3 63941 NULL
110872 +macvtap_recvmsg_63949 macvtap_recvmsg 4 63949 NULL
110873 +generic_acl_init_63955 generic_acl_init 0 63955 NULL
110874 +domain_pfn_mapping_63957 domain_pfn_mapping 4 63957 NULL
110875 +diva_xdi_write_63975 diva_xdi_write 4 63975 NULL
110876 +rs_extent_to_bm_page_63996 rs_extent_to_bm_page 0-1 63996 NULL
110877 +read_file_frameerrors_64001 read_file_frameerrors 3 64001 NULL
110878 +hfsplus_security_setxattr_64009 hfsplus_security_setxattr 4 64009 NULL
110879 +C_SYSC_sendfile64_64017 C_SYSC_sendfile64 4 64017 NULL
110880 +SyS_rt_sigpending_64018 SyS_rt_sigpending 2 64018 NULL
110881 +offset_to_vaddr_64025 offset_to_vaddr 0-2 64025 NULL
110882 +dbAllocDmapLev_64030 dbAllocDmapLev 0 64030 NULL
110883 +resize_async_buffer_64031 resize_async_buffer 4 64031 NULL
110884 +SyS_fsetxattr_64039 SyS_fsetxattr 4 64039 NULL
110885 +tfrc_calc_x_reverse_lookup_64057 tfrc_calc_x_reverse_lookup 0 64057 NULL
110886 +__sock_create_64069 __sock_create 0 64069 NULL
110887 +get_u8_64076 get_u8 0 64076 NULL
110888 +xilly_malloc_64077 xilly_malloc 2 64077 NULL
110889 +btrfs_copy_root_64079 btrfs_copy_root 0 64079 NULL
110890 +sl_realloc_bufs_64086 sl_realloc_bufs 2 64086 NULL
110891 +clear_update_marker_64088 clear_update_marker 0 64088 NULL nohasharray
110892 +vmci_handle_arr_get_size_64088 vmci_handle_arr_get_size 0 64088 &clear_update_marker_64088
110893 +lbs_highrssi_read_64089 lbs_highrssi_read 3 64089 NULL
110894 +SyS_set_mempolicy_64096 SyS_set_mempolicy 3 64096 NULL
110895 +SyS_mq_timedsend_64107 SyS_mq_timedsend 3 64107 NULL
110896 +rdma_addr_size_64116 rdma_addr_size 0 64116 NULL
110897 +do_load_xattr_datum_64118 do_load_xattr_datum 0 64118 NULL
110898 +bypass_wd_write_64120 bypass_wd_write 3 64120 NULL
110899 +ol_quota_entries_per_block_64122 ol_quota_entries_per_block 0 64122 NULL
110900 +ext4_prepare_inline_data_64124 ext4_prepare_inline_data 3 64124 NULL
110901 +init_bch_64130 init_bch 2-1 64130 NULL
110902 +SYSC_ptrace_64136 SYSC_ptrace 3 64136 NULL
110903 +ablkcipher_copy_iv_64140 ablkcipher_copy_iv 3 64140 NULL
110904 +dlfb_ops_write_64150 dlfb_ops_write 3 64150 NULL
110905 +__comedi_buf_alloc_64155 __comedi_buf_alloc 3 64155 NULL
110906 +cpumask_scnprintf_64170 cpumask_scnprintf 0-2 64170 NULL
110907 +ocfs2_reserve_blocks_for_rec_trunc_64206 ocfs2_reserve_blocks_for_rec_trunc 0 64206 NULL
110908 +xfs_vm_direct_IO_64223 xfs_vm_direct_IO 4 64223 NULL
110909 +read_pulse_64227 read_pulse 0-3 64227 NULL
110910 +ea_len_64229 ea_len 0 64229 NULL
110911 +btrfs_make_block_group_64241 btrfs_make_block_group 0 64241 NULL
110912 +__btrfs_update_delayed_inode_64248 __btrfs_update_delayed_inode 0 64248 NULL
110913 +io_capture_transfer_64276 io_capture_transfer 4 64276 NULL
110914 +btrfs_file_extent_offset_64278 btrfs_file_extent_offset 0 64278 NULL
110915 +btrfs_next_item_64285 btrfs_next_item 0 64285 NULL
110916 +sta_current_tx_rate_read_64286 sta_current_tx_rate_read 3 64286 NULL
110917 +event_id_read_64288 event_id_read 3 64288 NULL nohasharray
110918 +xfs_dir_cilookup_result_64288 xfs_dir_cilookup_result 3 64288 &event_id_read_64288
110919 +ocfs2_block_check_validate_bhs_64302 ocfs2_block_check_validate_bhs 0 64302 NULL
110920 +snd_hda_get_sub_nodes_64304 snd_hda_get_sub_nodes 0 64304 NULL
110921 +error_error_bar_retry_read_64305 error_error_bar_retry_read 3 64305 NULL
110922 +ffz_64324 ffz 0-1 64324 NULL
110923 +map_region_64328 map_region 1 64328 NULL
110924 +sisusbcon_clear_64329 sisusbcon_clear 4-3-5 64329 NULL
110925 +ts_write_64336 ts_write 3 64336 NULL
110926 +usbtmc_write_64340 usbtmc_write 3 64340 NULL
110927 +do_write_orph_node_64343 do_write_orph_node 2 64343 NULL
110928 +ft1000_read_reg_64352 ft1000_read_reg 0 64352 NULL
110929 +bnx2x_vfop_mcast_cmd_64354 bnx2x_vfop_mcast_cmd 5 64354 NULL
110930 +user_regset_copyin_64360 user_regset_copyin 7 64360 NULL
110931 +wlc_phy_loadsampletable_nphy_64367 wlc_phy_loadsampletable_nphy 3 64367 NULL
110932 +reg_create_64372 reg_create 5 64372 NULL
110933 +ilo_write_64378 ilo_write 3 64378 NULL
110934 +btrfs_map_block_64379 btrfs_map_block 3 64379 NULL
110935 +vmcs_readl_64381 vmcs_readl 0 64381 NULL
110936 +nilfs_alloc_seg_bio_64383 nilfs_alloc_seg_bio 3 64383 NULL
110937 +ir_lirc_transmit_ir_64403 ir_lirc_transmit_ir 3 64403 NULL
110938 +pidlist_allocate_64404 pidlist_allocate 1 64404 NULL
110939 +rx_hdr_overflow_read_64407 rx_hdr_overflow_read 3 64407 NULL
110940 +snd_card_create_64418 snd_card_create 4 64418 NULL nohasharray
110941 +keyctl_get_security_64418 keyctl_get_security 3 64418 &snd_card_create_64418
110942 +nl80211_send_mgmt_64419 nl80211_send_mgmt 7 64419 NULL
110943 +oom_adj_write_64428 oom_adj_write 3 64428 NULL
110944 +ext4_trim_extent_64431 ext4_trim_extent 4 64431 NULL nohasharray
110945 +read_file_spectral_short_repeat_64431 read_file_spectral_short_repeat 3 64431 &ext4_trim_extent_64431
110946 +ax25_recvmsg_64441 ax25_recvmsg 4 64441 NULL
110947 +single_open_size_64483 single_open_size 4 64483 NULL
110948 +p54_parse_rssical_64493 p54_parse_rssical 3 64493 NULL
110949 +msg_data_sz_64503 msg_data_sz 0 64503 NULL
110950 +remove_uuid_64505 remove_uuid 4 64505 NULL nohasharray
110951 +handle_abnormal_pfn_64505 handle_abnormal_pfn 3 64505 &remove_uuid_64505
110952 +crypto_blkcipher_alignmask_64520 crypto_blkcipher_alignmask 0 64520 NULL
110953 +opera1_usb_i2c_msgxfer_64521 opera1_usb_i2c_msgxfer 4 64521 NULL
110954 +iwl_dbgfs_ucode_tracing_write_64524 iwl_dbgfs_ucode_tracing_write 3 64524 NULL
110955 +ses_send_diag_64527 ses_send_diag 4 64527 NULL
110956 +prctl_set_mm_64538 prctl_set_mm 3 64538 NULL
110957 +SyS_bind_64544 SyS_bind 3 64544 NULL
110958 +rbd_obj_read_sync_64554 rbd_obj_read_sync 4-3 64554 NULL
110959 +__btrfs_prealloc_file_range_64557 __btrfs_prealloc_file_range 3-0 64557 NULL
110960 +__spi_sync_64561 __spi_sync 0 64561 NULL nohasharray
110961 +ll_max_rw_chunk_seq_write_64561 ll_max_rw_chunk_seq_write 3 64561 &__spi_sync_64561
110962 +__apei_exec_run_64563 __apei_exec_run 0 64563 NULL
110963 +kstrtoul_from_user_64569 kstrtoul_from_user 2 64569 NULL
110964 +do_erase_64574 do_erase 4 64574 NULL
110965 +fanotify_write_64623 fanotify_write 3 64623 NULL
110966 +ocfs2_remove_refcount_extent_64631 ocfs2_remove_refcount_extent 0 64631 NULL
110967 +to_dblock_64655 to_dblock 0-1 64655 NULL
110968 +regmap_read_debugfs_64658 regmap_read_debugfs 5 64658 NULL
110969 +ocfs2_read_xattr_block_64661 ocfs2_read_xattr_block 0 64661 NULL nohasharray
110970 +tlbflush_read_file_64661 tlbflush_read_file 3 64661 &ocfs2_read_xattr_block_64661 nohasharray
110971 +pool_create_64661 pool_create 3 64661 &tlbflush_read_file_64661
110972 +efx_tsoh_get_buffer_64664 efx_tsoh_get_buffer 3 64664 NULL
110973 +rx_rx_out_of_mpdu_nodes_read_64668 rx_rx_out_of_mpdu_nodes_read 3 64668 NULL
110974 +nr_free_zone_pages_64680 nr_free_zone_pages 0 64680 NULL
110975 +sec_bulk_write_64691 sec_bulk_write 2-3 64691 NULL nohasharray
110976 +mremap_to_64691 mremap_to 0-3 64691 &sec_bulk_write_64691
110977 +pfn_to_hpa_64703 pfn_to_hpa 0-1 64703 NULL
110978 +ip_select_ident_more_64707 ip_select_ident_more 4 64707 NULL
110979 +snd_pcm_oss_capture_position_fixup_64713 snd_pcm_oss_capture_position_fixup 0 64713 NULL
110980 +dapm_bias_read_file_64715 dapm_bias_read_file 3 64715 NULL
110981 +atomic_add_return_64720 atomic_add_return 0-1 64720 NULL
110982 +i2400m_msg_to_dev_64722 i2400m_msg_to_dev 3 64722 NULL
110983 +AscGetChipVersion_64737 AscGetChipVersion 0 64737 NULL
110984 +squashfs_read_inode_lookup_table_64739 squashfs_read_inode_lookup_table 4 64739 NULL
110985 +perf_swevent_event_64745 perf_swevent_event 2 64745 NULL
110986 +bio_map_kern_64751 bio_map_kern 3 64751 NULL
110987 +rt2x00debug_write_csr_64753 rt2x00debug_write_csr 3 64753 NULL
110988 +message_for_md_64777 message_for_md 5 64777 NULL
110989 +isr_low_rssi_read_64789 isr_low_rssi_read 3 64789 NULL
110990 +regmap_reg_ranges_read_file_64798 regmap_reg_ranges_read_file 3 64798 NULL
110991 +nfsctl_transaction_write_64800 nfsctl_transaction_write 3 64800 NULL
110992 +rfkill_fop_write_64808 rfkill_fop_write 3 64808 NULL
110993 +proc_projid_map_write_64810 proc_projid_map_write 3 64810 NULL
110994 +megaraid_change_queue_depth_64815 megaraid_change_queue_depth 2 64815 NULL
110995 +ecryptfs_send_miscdev_64816 ecryptfs_send_miscdev 2 64816 NULL
110996 +vaddr_get_pfn_64818 vaddr_get_pfn 1 64818 NULL
110997 +gfn_to_page_64826 gfn_to_page 2 64826 NULL
110998 +do_kimage_alloc_64827 do_kimage_alloc 3 64827 NULL
110999 +altera_set_dr_pre_64862 altera_set_dr_pre 2 64862 NULL
111000 +gfn_to_pfn_64870 gfn_to_pfn 2 64870 NULL
111001 +lprocfs_write_u64_helper_64880 lprocfs_write_u64_helper 2 64880 NULL
111002 +ffs_epfile_io_64886 ffs_epfile_io 3 64886 NULL
111003 +mk_pid_64894 mk_pid 0-3 64894 NULL
111004 +ieee80211_if_read_ave_beacon_64924 ieee80211_if_read_ave_beacon 3 64924 NULL
111005 +usb_reset_and_verify_device_64933 usb_reset_and_verify_device 0 64933 NULL
111006 +ubifs_wbuf_write_nolock_64946 ubifs_wbuf_write_nolock 3 64946 NULL
111007 +ip_options_get_from_user_64958 ip_options_get_from_user 4 64958 NULL
111008 +acpi_os_install_interrupt_handler_64968 acpi_os_install_interrupt_handler 1 64968 NULL
111009 +traceprobe_probes_write_64969 traceprobe_probes_write 3 64969 NULL
111010 +suspend_dtim_interval_read_64971 suspend_dtim_interval_read 3 64971 NULL
111011 +ext2_group_first_block_no_64972 ext2_group_first_block_no 0-2 64972 NULL
111012 +crypto_ahash_digestsize_65014 crypto_ahash_digestsize 0 65014 NULL
111013 +insert_dent_65034 insert_dent 7 65034 NULL
111014 +snd_hda_get_pin_label_65035 snd_hda_get_pin_label 5 65035 NULL
111015 +ext4_ind_trans_blocks_65053 ext4_ind_trans_blocks 0-2 65053 NULL
111016 +pcibios_enable_device_65059 pcibios_enable_device 0 65059 NULL
111017 +make_idx_node_65068 make_idx_node 0 65068 NULL
111018 +count_run_65072 count_run 0-2-4-5 65072 NULL
111019 +__alloc_bootmem_node_high_65076 __alloc_bootmem_node_high 2 65076 NULL
111020 +ocfs2_truncate_cluster_pages_65086 ocfs2_truncate_cluster_pages 2 65086 NULL
111021 +send_create_inode_65090 send_create_inode 0 65090 NULL nohasharray
111022 +ath9k_dump_mci_btcoex_65090 ath9k_dump_mci_btcoex 0 65090 &send_create_inode_65090
111023 +uasp_alloc_cmd_65097 uasp_alloc_cmd 0 65097 NULL
111024 +__ext3_journal_dirty_metadata_65103 __ext3_journal_dirty_metadata 0 65103 NULL
111025 +generic_ocp_write_65107 generic_ocp_write 4 65107 NULL
111026 +kswapd_shrink_zone_65174 kswapd_shrink_zone 4 65174 NULL
111027 +i2c_smbus_xfer_emulated_65183 i2c_smbus_xfer_emulated 0 65183 NULL
111028 +btrfs_run_delayed_items_nr_65204 btrfs_run_delayed_items_nr 0 65204 NULL
111029 +rx_rx_done_read_65217 rx_rx_done_read 3 65217 NULL
111030 +print_endpoint_stat_65232 print_endpoint_stat 3-4-0 65232 NULL
111031 +journal_get_write_access_65243 journal_get_write_access 0 65243 NULL
111032 +whci_n_caps_65247 whci_n_caps 0 65247 NULL
111033 +kmem_zalloc_greedy_65268 kmem_zalloc_greedy 3-2 65268 NULL
111034 +kmalloc_parameter_65279 kmalloc_parameter 1 65279 NULL
111035 +compat_core_sys_select_65285 compat_core_sys_select 1 65285 NULL
111036 +get_unaligned_le16_65293 get_unaligned_le16 0 65293 NULL
111037 +mpi_set_buffer_65294 mpi_set_buffer 3 65294 NULL
111038 +redirected_tty_write_65297 redirected_tty_write 3 65297 NULL
111039 +get_var_len_65304 get_var_len 0 65304 NULL
111040 +unpack_array_65318 unpack_array 0 65318 NULL
111041 +pci_vpd_find_tag_65325 pci_vpd_find_tag 0-2 65325 NULL
111042 +dccp_setsockopt_service_65336 dccp_setsockopt_service 4 65336 NULL
111043 +init_list_set_65351 init_list_set 3-2 65351 NULL
111044 +dma_rx_requested_read_65354 dma_rx_requested_read 3 65354 NULL
111045 +batadv_tt_save_orig_buffer_65361 batadv_tt_save_orig_buffer 4 65361 NULL
111046 +alloc_cpu_rmap_65363 alloc_cpu_rmap 1 65363 NULL
111047 +__ext4_new_inode_65370 __ext4_new_inode 5 65370 NULL
111048 +SyS_writev_65372 SyS_writev 3 65372 NULL
111049 +mi_set_context_65395 mi_set_context 0 65395 NULL
111050 +__alloc_bootmem_nopanic_65397 __alloc_bootmem_nopanic 1 65397 NULL
111051 +trace_seq_to_user_65398 trace_seq_to_user 3 65398 NULL
111052 +mtd_get_device_size_65400 mtd_get_device_size 0 65400 NULL
111053 +__read_vmcore_65402 __read_vmcore 2 65402 NULL
111054 +usb_ep_enable_65405 usb_ep_enable 0 65405 NULL
111055 +ocfs2_write_begin_nolock_65410 ocfs2_write_begin_nolock 4-3 65410 NULL
111056 +drm_calloc_large_65421 drm_calloc_large 2-1 65421 NULL
111057 +device_add_groups_65423 device_add_groups 0 65423 NULL
111058 +xpc_kzalloc_cacheline_aligned_65433 xpc_kzalloc_cacheline_aligned 1 65433 NULL
111059 +vmalloc_to_pfn_65437 vmalloc_to_pfn 0 65437 NULL
111060 +usb_alloc_coherent_65444 usb_alloc_coherent 2 65444 NULL
111061 +il_dbgfs_wd_timeout_write_65464 il_dbgfs_wd_timeout_write 3 65464 NULL
111062 +ext4_es_zeroout_65465 ext4_es_zeroout 0 65465 NULL
111063 +clear_user_65470 clear_user 2 65470 NULL
111064 +__pcibus_to_node_65489 __pcibus_to_node 0 65489 NULL nohasharray
111065 +dpcm_state_read_file_65489 dpcm_state_read_file 3 65489 &__pcibus_to_node_65489
111066 +lookup_inline_extent_backref_65493 lookup_inline_extent_backref 9-0 65493 NULL
111067 +qib_create_ctxtdata_65497 qib_create_ctxtdata 3 65497 NULL
111068 +nvme_trans_standard_inquiry_page_65526 nvme_trans_standard_inquiry_page 4 65526 NULL
111069 diff --git a/tools/gcc/size_overflow_plugin.c b/tools/gcc/size_overflow_plugin.c
111070 new file mode 100644
111071 index 0000000..5515dcb
111072 --- /dev/null
111073 +++ b/tools/gcc/size_overflow_plugin.c
111074 @@ -0,0 +1,3927 @@
111075 +/*
111076 + * Copyright 2011, 2012, 2013 by Emese Revfy <re.emese@gmail.com>
111077 + * Licensed under the GPL v2, or (at your option) v3
111078 + *
111079 + * Homepage:
111080 + * http://www.grsecurity.net/~ephox/overflow_plugin/
111081 + *
111082 + * Documentation:
111083 + * http://forums.grsecurity.net/viewtopic.php?f=7&t=3043
111084 + *
111085 + * This plugin recomputes expressions of function arguments marked by a size_overflow attribute
111086 + * with double integer precision (DImode/TImode for 32/64 bit integer types).
111087 + * The recomputed argument is checked against TYPE_MAX and an event is logged on overflow and the triggering process is killed.
111088 + *
111089 + * Usage:
111090 + * $ gcc -I`gcc -print-file-name=plugin`/include/c-family -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -ggdb -Wall -W -o size_overflow_plugin.so size_overflow_plugin.c
111091 + * $ gcc -fplugin=size_overflow_plugin.so test.c -O2
111092 + */
111093 +
111094 +#include "gcc-plugin.h"
111095 +#include "config.h"
111096 +#include "system.h"
111097 +#include "coretypes.h"
111098 +#include "tree.h"
111099 +#include "tree-pass.h"
111100 +#include "intl.h"
111101 +#include "plugin-version.h"
111102 +#include "tm.h"
111103 +#include "toplev.h"
111104 +#include "function.h"
111105 +#include "tree-flow.h"
111106 +#include "plugin.h"
111107 +#include "gimple.h"
111108 +#include "diagnostic.h"
111109 +#include "cfgloop.h"
111110 +
111111 +#if BUILDING_GCC_VERSION >= 4008
111112 +#define TODO_dump_func 0
111113 +#define TODO_dump_cgraph 0
111114 +#endif
111115 +
111116 +#define __unused __attribute__((__unused__))
111117 +#define ASM_NAME(node) IDENTIFIER_POINTER(DECL_ASSEMBLER_NAME(node))
111118 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
111119 +#define NAME_LEN(node) IDENTIFIER_LENGTH(DECL_NAME(node))
111120 +#define BEFORE_STMT true
111121 +#define AFTER_STMT false
111122 +#define CREATE_NEW_VAR NULL_TREE
111123 +#define CODES_LIMIT 32
111124 +#define MAX_PARAM 31
111125 +#define VEC_LEN 128
111126 +#define MY_STMT GF_PLF_1
111127 +#define NO_CAST_CHECK GF_PLF_2
111128 +#define RET_CHECK NULL_TREE
111129 +#define CANNOT_FIND_ARG 32
111130 +#define WRONG_NODE 32
111131 +#define NOT_INTENTIONAL_ASM NULL
111132 +#define MIN_CHECK true
111133 +#define MAX_CHECK false
111134 +
111135 +#define TURN_OFF_ASM_STR "# size_overflow MARK_TURN_OFF\n\t"
111136 +#define YES_ASM_STR "# size_overflow MARK_YES\n\t"
111137 +#define OK_ASM_STR "# size_overflow\n\t"
111138 +
111139 +#if BUILDING_GCC_VERSION == 4005
111140 +#define DECL_CHAIN(NODE) (TREE_CHAIN(DECL_MINIMAL_CHECK(NODE)))
111141 +#endif
111142 +
111143 +struct size_overflow_hash {
111144 + const struct size_overflow_hash * const next;
111145 + const char * const name;
111146 + const unsigned int param;
111147 +};
111148 +
111149 +#include "size_overflow_hash.h"
111150 +
111151 +enum mark {
111152 + MARK_NO, MARK_YES, MARK_NOT_INTENTIONAL, MARK_TURN_OFF
111153 +};
111154 +
111155 +static unsigned int call_count;
111156 +
111157 +struct visited {
111158 + struct visited *next;
111159 + const_tree fndecl;
111160 + unsigned int num;
111161 + const_gimple first_stmt;
111162 +};
111163 +
111164 +struct next_cgraph_node {
111165 + struct next_cgraph_node *next;
111166 + struct cgraph_node *current_function;
111167 + tree callee_fndecl;
111168 + unsigned int num;
111169 +};
111170 +
111171 +struct interesting_node {
111172 + struct interesting_node *next;
111173 + gimple first_stmt;
111174 + const_tree fndecl;
111175 + tree node;
111176 +#if BUILDING_GCC_VERSION <= 4007
111177 + VEC(tree, gc) *last_nodes;
111178 +#else
111179 + vec<tree, va_gc> *last_nodes;
111180 +#endif
111181 + unsigned int num;
111182 + enum mark intentional_attr_decl;
111183 + enum mark intentional_attr_cur_fndecl;
111184 + gimple intentional_mark_from_gimple;
111185 +};
111186 +
111187 +int plugin_is_GPL_compatible;
111188 +void debug_gimple_stmt(gimple gs);
111189 +
111190 +static tree report_size_overflow_decl;
111191 +static const_tree const_char_ptr_type_node;
111192 +
111193 +static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs);
111194 +static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs);
111195 +static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs);
111196 +static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs);
111197 +static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs);
111198 +
111199 +static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before);
111200 +static tree get_size_overflow_type(gimple stmt, const_tree node);
111201 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3);
111202 +
111203 +static struct plugin_info size_overflow_plugin_info = {
111204 + .version = "20131214beta",
111205 + .help = "no-size-overflow\tturn off size overflow checking\n",
111206 +};
111207 +
111208 +static tree handle_size_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
111209 +{
111210 + unsigned int arg_count;
111211 + enum tree_code code = TREE_CODE(*node);
111212 +
111213 + switch (code) {
111214 + case FUNCTION_DECL:
111215 + arg_count = type_num_arguments(TREE_TYPE(*node));
111216 + break;
111217 + case FUNCTION_TYPE:
111218 + case METHOD_TYPE:
111219 + arg_count = type_num_arguments(*node);
111220 + break;
111221 + default:
111222 + *no_add_attrs = true;
111223 + error("%s: %qE attribute only applies to functions", __func__, name);
111224 + return NULL_TREE;
111225 + }
111226 +
111227 + for (; args; args = TREE_CHAIN(args)) {
111228 + tree position = TREE_VALUE(args);
111229 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) {
111230 + error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
111231 + *no_add_attrs = true;
111232 + }
111233 + }
111234 + return NULL_TREE;
111235 +}
111236 +
111237 +static tree handle_intentional_overflow_attribute(tree *node, tree __unused name, tree args, int __unused flags, bool *no_add_attrs)
111238 +{
111239 + unsigned int arg_count;
111240 + enum tree_code code = TREE_CODE(*node);
111241 +
111242 + switch (code) {
111243 + case FUNCTION_DECL:
111244 + arg_count = type_num_arguments(TREE_TYPE(*node));
111245 + break;
111246 + case FUNCTION_TYPE:
111247 + case METHOD_TYPE:
111248 + arg_count = type_num_arguments(*node);
111249 + break;
111250 + case FIELD_DECL:
111251 + return NULL_TREE;
111252 + default:
111253 + *no_add_attrs = true;
111254 + error("%qE attribute only applies to functions", name);
111255 + return NULL_TREE;
111256 + }
111257 +
111258 + if (TREE_INT_CST_HIGH(TREE_VALUE(args)) != 0)
111259 + return NULL_TREE;
111260 +
111261 + for (; args; args = TREE_CHAIN(args)) {
111262 + tree position = TREE_VALUE(args);
111263 + if (TREE_CODE(position) != INTEGER_CST || TREE_INT_CST_LOW(position) > arg_count ) {
111264 + error("%s: parameter %u is outside range.", __func__, (unsigned int)TREE_INT_CST_LOW(position));
111265 + *no_add_attrs = true;
111266 + }
111267 + }
111268 + return NULL_TREE;
111269 +}
111270 +
111271 +static struct attribute_spec size_overflow_attr = {
111272 + .name = "size_overflow",
111273 + .min_length = 1,
111274 + .max_length = -1,
111275 + .decl_required = true,
111276 + .type_required = false,
111277 + .function_type_required = false,
111278 + .handler = handle_size_overflow_attribute,
111279 +#if BUILDING_GCC_VERSION >= 4007
111280 + .affects_type_identity = false
111281 +#endif
111282 +};
111283 +
111284 +static struct attribute_spec intentional_overflow_attr = {
111285 + .name = "intentional_overflow",
111286 + .min_length = 1,
111287 + .max_length = -1,
111288 + .decl_required = true,
111289 + .type_required = false,
111290 + .function_type_required = false,
111291 + .handler = handle_intentional_overflow_attribute,
111292 +#if BUILDING_GCC_VERSION >= 4007
111293 + .affects_type_identity = false
111294 +#endif
111295 +};
111296 +
111297 +static void register_attributes(void __unused *event_data, void __unused *data)
111298 +{
111299 + register_attribute(&size_overflow_attr);
111300 + register_attribute(&intentional_overflow_attr);
111301 +}
111302 +
111303 +static bool is_bool(const_tree node)
111304 +{
111305 + const_tree type;
111306 +
111307 + if (node == NULL_TREE)
111308 + return false;
111309 +
111310 + type = TREE_TYPE(node);
111311 + if (!INTEGRAL_TYPE_P(type))
111312 + return false;
111313 + if (TREE_CODE(type) == BOOLEAN_TYPE)
111314 + return true;
111315 + if (TYPE_PRECISION(type) == 1)
111316 + return true;
111317 + return false;
111318 +}
111319 +
111320 +static bool skip_types(const_tree var)
111321 +{
111322 + tree type;
111323 + enum tree_code code;
111324 +
111325 + if (is_gimple_constant(var))
111326 + return true;
111327 +
111328 + switch (TREE_CODE(var)) {
111329 + case ADDR_EXPR:
111330 +#if BUILDING_GCC_VERSION >= 4006
111331 + case MEM_REF:
111332 +#endif
111333 + case ARRAY_REF:
111334 + case BIT_FIELD_REF:
111335 + case INDIRECT_REF:
111336 + case TARGET_MEM_REF:
111337 + case COMPONENT_REF:
111338 + case VAR_DECL:
111339 + case VIEW_CONVERT_EXPR:
111340 + return true;
111341 + default:
111342 + break;
111343 + }
111344 +
111345 + code = TREE_CODE(var);
111346 + gcc_assert(code == SSA_NAME || code == PARM_DECL);
111347 +
111348 + type = TREE_TYPE(var);
111349 + switch (TREE_CODE(type)) {
111350 + case INTEGER_TYPE:
111351 + case ENUMERAL_TYPE:
111352 + return false;
111353 + case BOOLEAN_TYPE:
111354 + return is_bool(var);
111355 + default:
111356 + return true;
111357 + }
111358 +}
111359 +
111360 +static inline gimple get_def_stmt(const_tree node)
111361 +{
111362 + gcc_assert(node != NULL_TREE);
111363 +
111364 + if (skip_types(node))
111365 + return NULL;
111366 +
111367 + if (TREE_CODE(node) != SSA_NAME)
111368 + return NULL;
111369 + return SSA_NAME_DEF_STMT(node);
111370 +}
111371 +
111372 +static unsigned char get_tree_code(const_tree type)
111373 +{
111374 + switch (TREE_CODE(type)) {
111375 + case ARRAY_TYPE:
111376 + return 0;
111377 + case BOOLEAN_TYPE:
111378 + return 1;
111379 + case ENUMERAL_TYPE:
111380 + return 2;
111381 + case FUNCTION_TYPE:
111382 + return 3;
111383 + case INTEGER_TYPE:
111384 + return 4;
111385 + case POINTER_TYPE:
111386 + return 5;
111387 + case RECORD_TYPE:
111388 + return 6;
111389 + case UNION_TYPE:
111390 + return 7;
111391 + case VOID_TYPE:
111392 + return 8;
111393 + case REAL_TYPE:
111394 + return 9;
111395 + case VECTOR_TYPE:
111396 + return 10;
111397 + case REFERENCE_TYPE:
111398 + return 11;
111399 + case OFFSET_TYPE:
111400 + return 12;
111401 + case COMPLEX_TYPE:
111402 + return 13;
111403 + default:
111404 + debug_tree((tree)type);
111405 + gcc_unreachable();
111406 + }
111407 +}
111408 +
111409 +struct function_hash {
111410 + size_t tree_codes_len;
111411 + unsigned char tree_codes[CODES_LIMIT];
111412 + tree fndecl;
111413 + unsigned int hash;
111414 +};
111415 +
111416 +// http://www.team5150.com/~andrew/noncryptohashzoo2~/CrapWow.html
111417 +static unsigned int CrapWow(const char *key, unsigned int len, unsigned int seed)
111418 +{
111419 +#define cwfold( a, b, lo, hi ) { p = (unsigned int)(a) * (unsigned long long)(b); lo ^= (unsigned int)p; hi ^= (unsigned int)(p >> 32); }
111420 +#define cwmixa( in ) { cwfold( in, m, k, h ); }
111421 +#define cwmixb( in ) { cwfold( in, n, h, k ); }
111422 +
111423 + unsigned int m = 0x57559429;
111424 + unsigned int n = 0x5052acdb;
111425 + const unsigned int *key4 = (const unsigned int *)key;
111426 + unsigned int h = len;
111427 + unsigned int k = len + seed + n;
111428 + unsigned long long p;
111429 +
111430 + while (len >= 8) {
111431 + cwmixb(key4[0]) cwmixa(key4[1]) key4 += 2;
111432 + len -= 8;
111433 + }
111434 + if (len >= 4) {
111435 + cwmixb(key4[0]) key4 += 1;
111436 + len -= 4;
111437 + }
111438 + if (len)
111439 + cwmixa(key4[0] & ((1 << (len * 8)) - 1 ));
111440 + cwmixb(h ^ (k + n));
111441 + return k ^ h;
111442 +
111443 +#undef cwfold
111444 +#undef cwmixa
111445 +#undef cwmixb
111446 +}
111447 +
111448 +static void set_hash(const char *fn_name, struct function_hash *fn_hash_data)
111449 +{
111450 + unsigned int fn, codes, seed = 0;
111451 +
111452 + fn = CrapWow(fn_name, strlen(fn_name), seed) & 0xffff;
111453 + codes = CrapWow((const char*)fn_hash_data->tree_codes, fn_hash_data->tree_codes_len, seed) & 0xffff;
111454 +
111455 + fn_hash_data->hash = fn ^ codes;
111456 +}
111457 +
111458 +static void set_node_codes(const_tree type, struct function_hash *fn_hash_data)
111459 +{
111460 + gcc_assert(type != NULL_TREE);
111461 + gcc_assert(TREE_CODE_CLASS(TREE_CODE(type)) == tcc_type);
111462 +
111463 + while (type && fn_hash_data->tree_codes_len < CODES_LIMIT) {
111464 + fn_hash_data->tree_codes[fn_hash_data->tree_codes_len] = get_tree_code(type);
111465 + fn_hash_data->tree_codes_len++;
111466 + type = TREE_TYPE(type);
111467 + }
111468 +}
111469 +
111470 +static void set_result_codes(const_tree node, struct function_hash *fn_hash_data)
111471 +{
111472 + const_tree result;
111473 +
111474 + gcc_assert(node != NULL_TREE);
111475 +
111476 + if (DECL_P(node)) {
111477 + result = DECL_RESULT(node);
111478 + if (result != NULL_TREE)
111479 + return set_node_codes(TREE_TYPE(result), fn_hash_data);
111480 + return set_result_codes(TREE_TYPE(node), fn_hash_data);
111481 + }
111482 +
111483 + gcc_assert(TYPE_P(node));
111484 +
111485 + if (TREE_CODE(node) == FUNCTION_TYPE)
111486 + return set_result_codes(TREE_TYPE(node), fn_hash_data);
111487 +
111488 + return set_node_codes(node, fn_hash_data);
111489 +}
111490 +
111491 +static void set_function_codes(struct function_hash *fn_hash_data)
111492 +{
111493 + const_tree arg, type = TREE_TYPE(fn_hash_data->fndecl);
111494 + enum tree_code code = TREE_CODE(type);
111495 +
111496 + gcc_assert(code == FUNCTION_TYPE || code == METHOD_TYPE);
111497 +
111498 + set_result_codes(fn_hash_data->fndecl, fn_hash_data);
111499 +
111500 + for (arg = TYPE_ARG_TYPES(type); arg != NULL_TREE && fn_hash_data->tree_codes_len < CODES_LIMIT; arg = TREE_CHAIN(arg))
111501 + set_node_codes(TREE_VALUE(arg), fn_hash_data);
111502 +}
111503 +
111504 +static const struct size_overflow_hash *get_function_hash(tree fndecl)
111505 +{
111506 + const struct size_overflow_hash *entry;
111507 + struct function_hash fn_hash_data;
111508 + const char *func_name;
111509 +
111510 + // skip builtins __builtin_constant_p
111511 + if (DECL_BUILT_IN(fndecl))
111512 + return NULL;
111513 +
111514 + fn_hash_data.fndecl = fndecl;
111515 + fn_hash_data.tree_codes_len = 0;
111516 +
111517 + set_function_codes(&fn_hash_data);
111518 + gcc_assert(fn_hash_data.tree_codes_len != 0);
111519 +
111520 + func_name = ASM_NAME(fn_hash_data.fndecl);
111521 + set_hash(func_name, &fn_hash_data);
111522 +
111523 + entry = size_overflow_hash[fn_hash_data.hash];
111524 +
111525 + while (entry) {
111526 + if (!strcmp(entry->name, func_name))
111527 + return entry;
111528 + entry = entry->next;
111529 + }
111530 + return NULL;
111531 +}
111532 +
111533 +static void print_missing_msg(tree func, unsigned int argnum)
111534 +{
111535 + location_t loc;
111536 + const char *curfunc;
111537 + struct function_hash fn_hash_data;
111538 +
111539 + fn_hash_data.fndecl = DECL_ORIGIN(func);
111540 + fn_hash_data.tree_codes_len = 0;
111541 +
111542 + loc = DECL_SOURCE_LOCATION(fn_hash_data.fndecl);
111543 + curfunc = ASM_NAME(fn_hash_data.fndecl);
111544 +
111545 + set_function_codes(&fn_hash_data);
111546 + set_hash(curfunc, &fn_hash_data);
111547 +
111548 + inform(loc, "Function %s is missing from the size_overflow hash table +%s+%u+%u+", curfunc, curfunc, argnum, fn_hash_data.hash);
111549 +}
111550 +
111551 +static unsigned int find_arg_number_tree(const_tree arg, const_tree func)
111552 +{
111553 + tree var;
111554 + unsigned int argnum = 1;
111555 +
111556 + if (TREE_CODE(arg) == SSA_NAME)
111557 + arg = SSA_NAME_VAR(arg);
111558 +
111559 + for (var = DECL_ARGUMENTS(func); var; var = TREE_CHAIN(var), argnum++) {
111560 + if (!operand_equal_p(arg, var, 0) && strcmp(NAME(var), NAME(arg)))
111561 + continue;
111562 + if (!skip_types(var))
111563 + return argnum;
111564 + }
111565 +
111566 + return CANNOT_FIND_ARG;
111567 +}
111568 +
111569 +static tree create_new_var(tree type)
111570 +{
111571 + tree new_var = create_tmp_var(type, "cicus");
111572 +
111573 +#if BUILDING_GCC_VERSION <= 4007
111574 + add_referenced_var(new_var);
111575 +#endif
111576 + return new_var;
111577 +}
111578 +
111579 +static gimple create_binary_assign(enum tree_code code, gimple stmt, tree rhs1, tree rhs2)
111580 +{
111581 + gimple assign;
111582 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
111583 + tree type = TREE_TYPE(rhs1);
111584 + tree lhs = create_new_var(type);
111585 +
111586 + gcc_assert(types_compatible_p(type, TREE_TYPE(rhs2)));
111587 + assign = gimple_build_assign_with_ops(code, lhs, rhs1, rhs2);
111588 + gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
111589 +
111590 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
111591 + update_stmt(assign);
111592 + gimple_set_plf(assign, MY_STMT, true);
111593 + return assign;
111594 +}
111595 +
111596 +static tree cast_a_tree(tree type, tree var)
111597 +{
111598 + gcc_assert(type != NULL_TREE);
111599 + gcc_assert(var != NULL_TREE);
111600 + gcc_assert(fold_convertible_p(type, var));
111601 +
111602 + return fold_convert(type, var);
111603 +}
111604 +
111605 +static tree get_lhs(const_gimple stmt)
111606 +{
111607 + switch (gimple_code(stmt)) {
111608 + case GIMPLE_ASSIGN:
111609 + case GIMPLE_CALL:
111610 + return gimple_get_lhs(stmt);
111611 + case GIMPLE_PHI:
111612 + return gimple_phi_result(stmt);
111613 + default:
111614 + return NULL_TREE;
111615 + }
111616 +}
111617 +
111618 +static bool skip_cast(tree dst_type, const_tree rhs, bool force)
111619 +{
111620 + const_gimple def_stmt = get_def_stmt(rhs);
111621 +
111622 + if (force)
111623 + return false;
111624 +
111625 + if (is_gimple_constant(rhs))
111626 + return false;
111627 +
111628 + if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
111629 + return false;
111630 +
111631 + if (!types_compatible_p(dst_type, TREE_TYPE(rhs)))
111632 + return false;
111633 +
111634 + // DI type can be on 32 bit (from create_assign) but overflow type stays DI
111635 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
111636 + return false;
111637 +
111638 + return true;
111639 +}
111640 +
111641 +static gimple build_cast_stmt(tree dst_type, tree rhs, tree lhs, gimple_stmt_iterator *gsi, bool before, bool force)
111642 +{
111643 + gimple assign, def_stmt;
111644 +
111645 + gcc_assert(dst_type != NULL_TREE && rhs != NULL_TREE);
111646 + if (gsi_end_p(*gsi) && before == AFTER_STMT)
111647 + gcc_unreachable();
111648 +
111649 + def_stmt = get_def_stmt(rhs);
111650 + if (def_stmt && gimple_code(def_stmt) != GIMPLE_NOP && skip_cast(dst_type, rhs, force) && gimple_plf(def_stmt, MY_STMT))
111651 + return def_stmt;
111652 +
111653 + if (lhs == CREATE_NEW_VAR)
111654 + lhs = create_new_var(dst_type);
111655 +
111656 + assign = gimple_build_assign(lhs, cast_a_tree(dst_type, rhs));
111657 +
111658 + if (!gsi_end_p(*gsi)) {
111659 + location_t loc = gimple_location(gsi_stmt(*gsi));
111660 + gimple_set_location(assign, loc);
111661 + }
111662 +
111663 + gimple_assign_set_lhs(assign, make_ssa_name(lhs, assign));
111664 +
111665 + if (before)
111666 + gsi_insert_before(gsi, assign, GSI_NEW_STMT);
111667 + else
111668 + gsi_insert_after(gsi, assign, GSI_NEW_STMT);
111669 + update_stmt(assign);
111670 + return assign;
111671 +}
111672 +
111673 +static tree cast_to_new_size_overflow_type(gimple stmt, tree rhs, tree size_overflow_type, bool before)
111674 +{
111675 + gimple_stmt_iterator gsi;
111676 + tree lhs;
111677 + gimple new_stmt;
111678 +
111679 + if (rhs == NULL_TREE)
111680 + return NULL_TREE;
111681 +
111682 + gsi = gsi_for_stmt(stmt);
111683 + new_stmt = build_cast_stmt(size_overflow_type, rhs, CREATE_NEW_VAR, &gsi, before, false);
111684 + gimple_set_plf(new_stmt, MY_STMT, true);
111685 +
111686 + lhs = get_lhs(new_stmt);
111687 + gcc_assert(lhs != NULL_TREE);
111688 + return lhs;
111689 +}
111690 +
111691 +static tree cast_to_TI_type(gimple stmt, tree node)
111692 +{
111693 + gimple_stmt_iterator gsi;
111694 + gimple cast_stmt;
111695 + tree type = TREE_TYPE(node);
111696 +
111697 + if (types_compatible_p(type, intTI_type_node))
111698 + return node;
111699 +
111700 + gsi = gsi_for_stmt(stmt);
111701 + cast_stmt = build_cast_stmt(intTI_type_node, node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
111702 + gimple_set_plf(cast_stmt, MY_STMT, true);
111703 + return gimple_assign_lhs(cast_stmt);
111704 +}
111705 +
111706 +static tree create_assign(struct pointer_set_t *visited, gimple oldstmt, tree rhs1, bool before)
111707 +{
111708 + tree lhs, new_lhs;
111709 + gimple_stmt_iterator gsi;
111710 +
111711 + if (rhs1 == NULL_TREE) {
111712 + debug_gimple_stmt(oldstmt);
111713 + error("%s: rhs1 is NULL_TREE", __func__);
111714 + gcc_unreachable();
111715 + }
111716 +
111717 + switch (gimple_code(oldstmt)) {
111718 + case GIMPLE_ASM:
111719 + lhs = rhs1;
111720 + break;
111721 + case GIMPLE_CALL:
111722 + case GIMPLE_ASSIGN:
111723 + lhs = gimple_get_lhs(oldstmt);
111724 + break;
111725 + default:
111726 + debug_gimple_stmt(oldstmt);
111727 + gcc_unreachable();
111728 + }
111729 +
111730 + gsi = gsi_for_stmt(oldstmt);
111731 + pointer_set_insert(visited, oldstmt);
111732 + if (lookup_stmt_eh_lp(oldstmt) != 0) {
111733 + basic_block next_bb, cur_bb;
111734 + const_edge e;
111735 +
111736 + gcc_assert(before == false);
111737 + gcc_assert(stmt_can_throw_internal(oldstmt));
111738 + gcc_assert(gimple_code(oldstmt) == GIMPLE_CALL);
111739 + gcc_assert(!gsi_end_p(gsi));
111740 +
111741 + cur_bb = gimple_bb(oldstmt);
111742 + next_bb = cur_bb->next_bb;
111743 + e = find_edge(cur_bb, next_bb);
111744 + gcc_assert(e != NULL);
111745 + gcc_assert(e->flags & EDGE_FALLTHRU);
111746 +
111747 + gsi = gsi_after_labels(next_bb);
111748 + gcc_assert(!gsi_end_p(gsi));
111749 +
111750 + before = true;
111751 + oldstmt = gsi_stmt(gsi);
111752 + }
111753 +
111754 + new_lhs = cast_to_new_size_overflow_type(oldstmt, rhs1, get_size_overflow_type(oldstmt, lhs), before);
111755 + return new_lhs;
111756 +}
111757 +
111758 +static tree dup_assign(struct pointer_set_t *visited, gimple oldstmt, const_tree node, tree rhs1, tree rhs2, tree __unused rhs3)
111759 +{
111760 + gimple stmt;
111761 + gimple_stmt_iterator gsi;
111762 + tree size_overflow_type, new_var, lhs = gimple_assign_lhs(oldstmt);
111763 +
111764 + if (gimple_plf(oldstmt, MY_STMT))
111765 + return lhs;
111766 +
111767 + if (gimple_num_ops(oldstmt) != 4 && rhs1 == NULL_TREE) {
111768 + rhs1 = gimple_assign_rhs1(oldstmt);
111769 + rhs1 = create_assign(visited, oldstmt, rhs1, BEFORE_STMT);
111770 + }
111771 + if (gimple_num_ops(oldstmt) == 3 && rhs2 == NULL_TREE) {
111772 + rhs2 = gimple_assign_rhs2(oldstmt);
111773 + rhs2 = create_assign(visited, oldstmt, rhs2, BEFORE_STMT);
111774 + }
111775 +
111776 + stmt = gimple_copy(oldstmt);
111777 + gimple_set_location(stmt, gimple_location(oldstmt));
111778 + gimple_set_plf(stmt, MY_STMT, true);
111779 +
111780 + if (gimple_assign_rhs_code(oldstmt) == WIDEN_MULT_EXPR)
111781 + gimple_assign_set_rhs_code(stmt, MULT_EXPR);
111782 +
111783 + size_overflow_type = get_size_overflow_type(oldstmt, node);
111784 +
111785 + new_var = create_new_var(size_overflow_type);
111786 + new_var = make_ssa_name(new_var, stmt);
111787 + gimple_assign_set_lhs(stmt, new_var);
111788 +
111789 + if (rhs1 != NULL_TREE)
111790 + gimple_assign_set_rhs1(stmt, rhs1);
111791 +
111792 + if (rhs2 != NULL_TREE)
111793 + gimple_assign_set_rhs2(stmt, rhs2);
111794 +#if BUILDING_GCC_VERSION >= 4007
111795 + if (rhs3 != NULL_TREE)
111796 + gimple_assign_set_rhs3(stmt, rhs3);
111797 +#endif
111798 + gimple_set_vuse(stmt, gimple_vuse(oldstmt));
111799 + gimple_set_vdef(stmt, gimple_vdef(oldstmt));
111800 +
111801 + gsi = gsi_for_stmt(oldstmt);
111802 + gsi_insert_after(&gsi, stmt, GSI_SAME_STMT);
111803 + update_stmt(stmt);
111804 + pointer_set_insert(visited, oldstmt);
111805 + return gimple_assign_lhs(stmt);
111806 +}
111807 +
111808 +static tree cast_parm_decl(tree phi_ssa_name, tree arg, tree size_overflow_type, basic_block bb)
111809 +{
111810 + gimple assign;
111811 + gimple_stmt_iterator gsi;
111812 + basic_block first_bb;
111813 +
111814 + gcc_assert(SSA_NAME_IS_DEFAULT_DEF(arg));
111815 +
111816 + if (bb->index == 0) {
111817 + first_bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
111818 + gcc_assert(dom_info_available_p(CDI_DOMINATORS));
111819 + set_immediate_dominator(CDI_DOMINATORS, first_bb, ENTRY_BLOCK_PTR);
111820 + bb = first_bb;
111821 + }
111822 +
111823 + gsi = gsi_after_labels(bb);
111824 + assign = build_cast_stmt(size_overflow_type, arg, phi_ssa_name, &gsi, BEFORE_STMT, false);
111825 + gimple_set_plf(assign, MY_STMT, true);
111826 +
111827 + return gimple_assign_lhs(assign);
111828 +}
111829 +
111830 +static tree use_phi_ssa_name(tree ssa_name_var, tree new_arg)
111831 +{
111832 + gimple_stmt_iterator gsi;
111833 + gimple assign, def_stmt = get_def_stmt(new_arg);
111834 +
111835 + if (gimple_code(def_stmt) == GIMPLE_PHI) {
111836 + gsi = gsi_after_labels(gimple_bb(def_stmt));
111837 + assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, BEFORE_STMT, true);
111838 + } else {
111839 + gsi = gsi_for_stmt(def_stmt);
111840 + assign = build_cast_stmt(TREE_TYPE(new_arg), new_arg, ssa_name_var, &gsi, AFTER_STMT, true);
111841 + }
111842 +
111843 + gimple_set_plf(assign, MY_STMT, true);
111844 + return gimple_assign_lhs(assign);
111845 +}
111846 +
111847 +static tree cast_visited_phi_arg(tree ssa_name_var, tree arg, tree size_overflow_type)
111848 +{
111849 + basic_block bb;
111850 + gimple_stmt_iterator gsi;
111851 + const_gimple def_stmt;
111852 + gimple assign;
111853 +
111854 + def_stmt = get_def_stmt(arg);
111855 + bb = gimple_bb(def_stmt);
111856 + gcc_assert(bb->index != 0);
111857 + gsi = gsi_after_labels(bb);
111858 +
111859 + assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, BEFORE_STMT, false);
111860 + gimple_set_plf(assign, MY_STMT, true);
111861 + return gimple_assign_lhs(assign);
111862 +}
111863 +
111864 +static tree create_new_phi_arg(tree ssa_name_var, tree new_arg, gimple oldstmt, unsigned int i)
111865 +{
111866 + tree size_overflow_type;
111867 + tree arg;
111868 + const_gimple def_stmt;
111869 +
111870 + if (new_arg != NULL_TREE && is_gimple_constant(new_arg))
111871 + return new_arg;
111872 +
111873 + arg = gimple_phi_arg_def(oldstmt, i);
111874 + def_stmt = get_def_stmt(arg);
111875 + gcc_assert(def_stmt != NULL);
111876 + size_overflow_type = get_size_overflow_type(oldstmt, arg);
111877 +
111878 + switch (gimple_code(def_stmt)) {
111879 + case GIMPLE_PHI:
111880 + return cast_visited_phi_arg(ssa_name_var, arg, size_overflow_type);
111881 + case GIMPLE_NOP: {
111882 + basic_block bb;
111883 +
111884 + bb = gimple_phi_arg_edge(oldstmt, i)->src;
111885 + return cast_parm_decl(ssa_name_var, arg, size_overflow_type, bb);
111886 + }
111887 + case GIMPLE_ASM: {
111888 + gimple_stmt_iterator gsi;
111889 + gimple assign, stmt = get_def_stmt(arg);
111890 +
111891 + gsi = gsi_for_stmt(stmt);
111892 + assign = build_cast_stmt(size_overflow_type, arg, ssa_name_var, &gsi, AFTER_STMT, false);
111893 + gimple_set_plf(assign, MY_STMT, true);
111894 + return gimple_assign_lhs(assign);
111895 + }
111896 + default:
111897 + gcc_assert(new_arg != NULL_TREE);
111898 + gcc_assert(types_compatible_p(TREE_TYPE(new_arg), size_overflow_type));
111899 + return use_phi_ssa_name(ssa_name_var, new_arg);
111900 + }
111901 +}
111902 +
111903 +static gimple overflow_create_phi_node(gimple oldstmt, tree result)
111904 +{
111905 + basic_block bb;
111906 + gimple phi;
111907 + gimple_seq seq;
111908 + gimple_stmt_iterator gsi = gsi_for_stmt(oldstmt);
111909 +
111910 + bb = gsi_bb(gsi);
111911 +
111912 + if (result == NULL_TREE) {
111913 + tree old_result = gimple_phi_result(oldstmt);
111914 + tree size_overflow_type = get_size_overflow_type(oldstmt, old_result);
111915 +
111916 + result = create_new_var(size_overflow_type);
111917 + }
111918 +
111919 + phi = create_phi_node(result, bb);
111920 + gimple_phi_set_result(phi, make_ssa_name(result, phi));
111921 + seq = phi_nodes(bb);
111922 + gsi = gsi_last(seq);
111923 + gsi_remove(&gsi, false);
111924 +
111925 + gsi = gsi_for_stmt(oldstmt);
111926 + gsi_insert_after(&gsi, phi, GSI_NEW_STMT);
111927 + gimple_set_bb(phi, bb);
111928 + gimple_set_plf(phi, MY_STMT, true);
111929 + return phi;
111930 +}
111931 +
111932 +#if BUILDING_GCC_VERSION <= 4007
111933 +static tree create_new_phi_node(VEC(tree, gc) *args, tree ssa_name_var, gimple oldstmt)
111934 +#else
111935 +static tree create_new_phi_node(vec<tree, va_gc> *args, tree ssa_name_var, gimple oldstmt)
111936 +#endif
111937 +{
111938 + gimple new_phi;
111939 + unsigned int i;
111940 + tree arg, result;
111941 + location_t loc = gimple_location(oldstmt);
111942 +
111943 +#if BUILDING_GCC_VERSION <= 4007
111944 + gcc_assert(!VEC_empty(tree, args));
111945 +#else
111946 + gcc_assert(!args->is_empty());
111947 +#endif
111948 +
111949 + new_phi = overflow_create_phi_node(oldstmt, ssa_name_var);
111950 + result = gimple_phi_result(new_phi);
111951 + ssa_name_var = SSA_NAME_VAR(result);
111952 +
111953 +
111954 +#if BUILDING_GCC_VERSION == 4005
111955 + for (i = 0; i < VEC_length(tree, args); i++) {
111956 + arg = VEC_index(tree, args, i);
111957 +#elif BUILDING_GCC_VERSION <= 4007
111958 + FOR_EACH_VEC_ELT(tree, args, i, arg) {
111959 +#else
111960 + FOR_EACH_VEC_ELT(*args, i, arg) {
111961 +#endif
111962 + arg = create_new_phi_arg(ssa_name_var, arg, oldstmt, i);
111963 + add_phi_arg(new_phi, arg, gimple_phi_arg_edge(oldstmt, i), loc);
111964 + }
111965 +
111966 +#if BUILDING_GCC_VERSION <= 4007
111967 + VEC_free(tree, gc, args);
111968 +#else
111969 + vec_free(args);
111970 +#endif
111971 + update_stmt(new_phi);
111972 + return result;
111973 +}
111974 +
111975 +static tree handle_phi(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree orig_result)
111976 +{
111977 + tree ssa_name_var = NULL_TREE;
111978 +#if BUILDING_GCC_VERSION <= 4007
111979 + VEC(tree, gc) *args;
111980 +#else
111981 + vec<tree, va_gc> *args;
111982 +#endif
111983 + gimple oldstmt = get_def_stmt(orig_result);
111984 + unsigned int i, len = gimple_phi_num_args(oldstmt);
111985 +
111986 + pointer_set_insert(visited, oldstmt);
111987 +#if BUILDING_GCC_VERSION <= 4007
111988 + args = VEC_alloc(tree, gc, len);
111989 +#else
111990 + vec_alloc(args, len);
111991 +#endif
111992 + for (i = 0; i < len; i++) {
111993 + tree arg, new_arg;
111994 +
111995 + arg = gimple_phi_arg_def(oldstmt, i);
111996 + new_arg = expand(visited, caller_node, arg);
111997 +
111998 + if (ssa_name_var == NULL_TREE && new_arg != NULL_TREE)
111999 + ssa_name_var = SSA_NAME_VAR(new_arg);
112000 +
112001 + if (is_gimple_constant(arg)) {
112002 + tree size_overflow_type = get_size_overflow_type(oldstmt, arg);
112003 +
112004 + new_arg = cast_a_tree(size_overflow_type, arg);
112005 + }
112006 +
112007 +#if BUILDING_GCC_VERSION <= 4007
112008 + VEC_safe_push(tree, gc, args, new_arg);
112009 +#else
112010 + vec_safe_push(args, new_arg);
112011 +#endif
112012 + }
112013 +
112014 + return create_new_phi_node(args, ssa_name_var, oldstmt);
112015 +}
112016 +
112017 +static tree change_assign_rhs(gimple stmt, const_tree orig_rhs, tree new_rhs)
112018 +{
112019 + gimple assign;
112020 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
112021 + tree origtype = TREE_TYPE(orig_rhs);
112022 +
112023 + gcc_assert(is_gimple_assign(stmt));
112024 +
112025 + assign = build_cast_stmt(origtype, new_rhs, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
112026 + gimple_set_plf(assign, MY_STMT, true);
112027 + return gimple_assign_lhs(assign);
112028 +}
112029 +
112030 +static bool is_a_cast_and_const_overflow(const_tree no_const_rhs)
112031 +{
112032 + const_tree rhs1, lhs, rhs1_type, lhs_type;
112033 + enum machine_mode lhs_mode, rhs_mode;
112034 + gimple def_stmt = get_def_stmt(no_const_rhs);
112035 +
112036 + if (!def_stmt || !gimple_assign_cast_p(def_stmt))
112037 + return false;
112038 +
112039 + rhs1 = gimple_assign_rhs1(def_stmt);
112040 + lhs = gimple_assign_lhs(def_stmt);
112041 + rhs1_type = TREE_TYPE(rhs1);
112042 + lhs_type = TREE_TYPE(lhs);
112043 + rhs_mode = TYPE_MODE(rhs1_type);
112044 + lhs_mode = TYPE_MODE(lhs_type);
112045 + if (TYPE_UNSIGNED(lhs_type) == TYPE_UNSIGNED(rhs1_type) || lhs_mode != rhs_mode)
112046 + return false;
112047 +
112048 + return true;
112049 +}
112050 +
112051 +static tree create_cast_assign(struct pointer_set_t *visited, gimple stmt)
112052 +{
112053 + tree rhs1 = gimple_assign_rhs1(stmt);
112054 + tree lhs = gimple_assign_lhs(stmt);
112055 + const_tree rhs1_type = TREE_TYPE(rhs1);
112056 + const_tree lhs_type = TREE_TYPE(lhs);
112057 +
112058 + if (TYPE_UNSIGNED(rhs1_type) == TYPE_UNSIGNED(lhs_type))
112059 + return create_assign(visited, stmt, lhs, AFTER_STMT);
112060 +
112061 + return create_assign(visited, stmt, rhs1, AFTER_STMT);
112062 +}
112063 +
112064 +static bool no_uses(tree node)
112065 +{
112066 + imm_use_iterator imm_iter;
112067 + use_operand_p use_p;
112068 +
112069 + FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
112070 + const_gimple use_stmt = USE_STMT(use_p);
112071 +
112072 + if (use_stmt == NULL)
112073 + return true;
112074 + if (is_gimple_debug(use_stmt))
112075 + continue;
112076 + return false;
112077 + }
112078 + return true;
112079 +}
112080 +
112081 +// 3.8.5 mm/page-writeback.c __ilog2_u64(): ret, uint + uintmax; uint -> int; int max
112082 +static bool is_const_plus_unsigned_signed_truncation(const_tree lhs)
112083 +{
112084 + tree rhs1, lhs_type, rhs_type, rhs2, not_const_rhs;
112085 + gimple def_stmt = get_def_stmt(lhs);
112086 +
112087 + if (!def_stmt || !gimple_assign_cast_p(def_stmt))
112088 + return false;
112089 +
112090 + rhs1 = gimple_assign_rhs1(def_stmt);
112091 + rhs_type = TREE_TYPE(rhs1);
112092 + lhs_type = TREE_TYPE(lhs);
112093 + if (TYPE_UNSIGNED(lhs_type) || !TYPE_UNSIGNED(rhs_type))
112094 + return false;
112095 + if (TYPE_MODE(lhs_type) != TYPE_MODE(rhs_type))
112096 + return false;
112097 +
112098 + def_stmt = get_def_stmt(rhs1);
112099 + if (!def_stmt || !is_gimple_assign(def_stmt) || gimple_num_ops(def_stmt) != 3)
112100 + return false;
112101 +
112102 + if (gimple_assign_rhs_code(def_stmt) != PLUS_EXPR)
112103 + return false;
112104 +
112105 + rhs1 = gimple_assign_rhs1(def_stmt);
112106 + rhs2 = gimple_assign_rhs2(def_stmt);
112107 + if (!is_gimple_constant(rhs1) && !is_gimple_constant(rhs2))
112108 + return false;
112109 +
112110 + if (is_gimple_constant(rhs2))
112111 + not_const_rhs = rhs1;
112112 + else
112113 + not_const_rhs = rhs2;
112114 +
112115 + return no_uses(not_const_rhs);
112116 +}
112117 +
112118 +static bool skip_lhs_cast_check(const_gimple stmt)
112119 +{
112120 + const_tree rhs = gimple_assign_rhs1(stmt);
112121 + const_gimple def_stmt = get_def_stmt(rhs);
112122 +
112123 + // 3.8.2 kernel/futex_compat.c compat_exit_robust_list(): get_user() 64 ulong -> int (compat_long_t), int max
112124 + if (gimple_code(def_stmt) == GIMPLE_ASM)
112125 + return true;
112126 +
112127 + if (is_const_plus_unsigned_signed_truncation(rhs))
112128 + return true;
112129 +
112130 + return false;
112131 +}
112132 +
112133 +static tree create_cast_overflow_check(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree new_rhs1, gimple stmt)
112134 +{
112135 + bool cast_lhs, cast_rhs;
112136 + tree lhs = gimple_assign_lhs(stmt);
112137 + tree rhs = gimple_assign_rhs1(stmt);
112138 + const_tree lhs_type = TREE_TYPE(lhs);
112139 + const_tree rhs_type = TREE_TYPE(rhs);
112140 + enum machine_mode lhs_mode = TYPE_MODE(lhs_type);
112141 + enum machine_mode rhs_mode = TYPE_MODE(rhs_type);
112142 + unsigned int lhs_size = GET_MODE_BITSIZE(lhs_mode);
112143 + unsigned int rhs_size = GET_MODE_BITSIZE(rhs_mode);
112144 +
112145 + static bool check_lhs[3][4] = {
112146 + // ss su us uu
112147 + { false, true, true, false }, // lhs > rhs
112148 + { false, false, false, false }, // lhs = rhs
112149 + { true, true, true, true }, // lhs < rhs
112150 + };
112151 +
112152 + static bool check_rhs[3][4] = {
112153 + // ss su us uu
112154 + { true, false, true, true }, // lhs > rhs
112155 + { true, false, true, true }, // lhs = rhs
112156 + { true, false, true, true }, // lhs < rhs
112157 + };
112158 +
112159 + // skip lhs check on signed SI -> HI cast or signed SI -> QI cast !!!!
112160 + if (rhs_mode == SImode && !TYPE_UNSIGNED(rhs_type) && (lhs_mode == HImode || lhs_mode == QImode))
112161 + return create_assign(visited, stmt, lhs, AFTER_STMT);
112162 +
112163 + if (lhs_size > rhs_size) {
112164 + cast_lhs = check_lhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
112165 + cast_rhs = check_rhs[0][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
112166 + } else if (lhs_size == rhs_size) {
112167 + cast_lhs = check_lhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
112168 + cast_rhs = check_rhs[1][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
112169 + } else {
112170 + cast_lhs = check_lhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
112171 + cast_rhs = check_rhs[2][TYPE_UNSIGNED(rhs_type) + 2 * TYPE_UNSIGNED(lhs_type)];
112172 + }
112173 +
112174 + if (!cast_lhs && !cast_rhs)
112175 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
112176 +
112177 + if (cast_lhs && !skip_lhs_cast_check(stmt))
112178 + check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, lhs, BEFORE_STMT);
112179 +
112180 + if (cast_rhs)
112181 + check_size_overflow(caller_node, stmt, TREE_TYPE(new_rhs1), new_rhs1, rhs, BEFORE_STMT);
112182 +
112183 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
112184 +}
112185 +
112186 +static tree handle_unary_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node, gimple stmt)
112187 +{
112188 + tree rhs1, new_rhs1, lhs = gimple_assign_lhs(stmt);
112189 +
112190 + if (gimple_plf(stmt, MY_STMT))
112191 + return lhs;
112192 +
112193 + rhs1 = gimple_assign_rhs1(stmt);
112194 + if (TREE_CODE(TREE_TYPE(rhs1)) == POINTER_TYPE)
112195 + return create_assign(visited, stmt, lhs, AFTER_STMT);
112196 +
112197 + new_rhs1 = expand(visited, caller_node, rhs1);
112198 +
112199 + if (new_rhs1 == NULL_TREE)
112200 + return create_cast_assign(visited, stmt);
112201 +
112202 + if (gimple_plf(stmt, NO_CAST_CHECK))
112203 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
112204 +
112205 + if (gimple_assign_rhs_code(stmt) == BIT_NOT_EXPR) {
112206 + tree size_overflow_type = get_size_overflow_type(stmt, rhs1);
112207 +
112208 + new_rhs1 = cast_to_new_size_overflow_type(stmt, new_rhs1, size_overflow_type, BEFORE_STMT);
112209 + check_size_overflow(caller_node, stmt, size_overflow_type, new_rhs1, rhs1, BEFORE_STMT);
112210 + return create_assign(visited, stmt, lhs, AFTER_STMT);
112211 + }
112212 +
112213 + if (!gimple_assign_cast_p(stmt))
112214 + return dup_assign(visited, stmt, lhs, new_rhs1, NULL_TREE, NULL_TREE);
112215 +
112216 + return create_cast_overflow_check(visited, caller_node, new_rhs1, stmt);
112217 +}
112218 +
112219 +static tree handle_unary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, gimple stmt)
112220 +{
112221 + tree rhs1, lhs = gimple_assign_lhs(stmt);
112222 + gimple def_stmt = get_def_stmt(lhs);
112223 +
112224 + gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP);
112225 + rhs1 = gimple_assign_rhs1(def_stmt);
112226 +
112227 + if (is_gimple_constant(rhs1))
112228 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
112229 +
112230 + switch (TREE_CODE(rhs1)) {
112231 + case SSA_NAME:
112232 + return handle_unary_rhs(visited, caller_node, def_stmt);
112233 + case ARRAY_REF:
112234 + case BIT_FIELD_REF:
112235 + case ADDR_EXPR:
112236 + case COMPONENT_REF:
112237 + case INDIRECT_REF:
112238 +#if BUILDING_GCC_VERSION >= 4006
112239 + case MEM_REF:
112240 +#endif
112241 + case TARGET_MEM_REF:
112242 + case VIEW_CONVERT_EXPR:
112243 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
112244 + case PARM_DECL:
112245 + case VAR_DECL:
112246 + return create_assign(visited, stmt, lhs, AFTER_STMT);
112247 +
112248 + default:
112249 + debug_gimple_stmt(def_stmt);
112250 + debug_tree(rhs1);
112251 + gcc_unreachable();
112252 + }
112253 +}
112254 +
112255 +static void insert_cond(basic_block cond_bb, tree arg, enum tree_code cond_code, tree type_value)
112256 +{
112257 + gimple cond_stmt;
112258 + gimple_stmt_iterator gsi = gsi_last_bb(cond_bb);
112259 +
112260 + cond_stmt = gimple_build_cond(cond_code, arg, type_value, NULL_TREE, NULL_TREE);
112261 + gsi_insert_after(&gsi, cond_stmt, GSI_CONTINUE_LINKING);
112262 + update_stmt(cond_stmt);
112263 +}
112264 +
112265 +static tree create_string_param(tree string)
112266 +{
112267 + tree i_type, a_type;
112268 + const int length = TREE_STRING_LENGTH(string);
112269 +
112270 + gcc_assert(length > 0);
112271 +
112272 + i_type = build_index_type(build_int_cst(NULL_TREE, length - 1));
112273 + a_type = build_array_type(char_type_node, i_type);
112274 +
112275 + TREE_TYPE(string) = a_type;
112276 + TREE_CONSTANT(string) = 1;
112277 + TREE_READONLY(string) = 1;
112278 +
112279 + return build1(ADDR_EXPR, ptr_type_node, string);
112280 +}
112281 +
112282 +#if BUILDING_GCC_VERSION <= 4006
112283 +struct cgraph_node *cgraph_get_create_node(tree decl);
112284 +
112285 +struct cgraph_node *cgraph_get_create_node(tree decl)
112286 +{
112287 + struct cgraph_node *node;
112288 +
112289 + node = cgraph_get_node(decl);
112290 + if (node)
112291 + return node;
112292 + return cgraph_node(decl);
112293 +}
112294 +#endif
112295 +
112296 +static void insert_cond_result(struct cgraph_node *caller_node, basic_block bb_true, const_gimple stmt, const_tree arg, bool min)
112297 +{
112298 + gimple func_stmt;
112299 + const_gimple def_stmt;
112300 + const_tree loc_line;
112301 + tree loc_file, ssa_name, current_func;
112302 + expanded_location xloc;
112303 + char *ssa_name_buf;
112304 + int len;
112305 + struct cgraph_edge *edge;
112306 + struct cgraph_node *callee_node;
112307 + int frequency;
112308 + gimple_stmt_iterator gsi = gsi_start_bb(bb_true);
112309 +
112310 + def_stmt = get_def_stmt(arg);
112311 + xloc = expand_location(gimple_location(def_stmt));
112312 +
112313 + if (!gimple_has_location(def_stmt)) {
112314 + xloc = expand_location(gimple_location(stmt));
112315 + if (!gimple_has_location(stmt))
112316 + xloc = expand_location(DECL_SOURCE_LOCATION(current_function_decl));
112317 + }
112318 +
112319 + loc_line = build_int_cstu(unsigned_type_node, xloc.line);
112320 +
112321 + loc_file = build_string(strlen(xloc.file) + 1, xloc.file);
112322 + loc_file = create_string_param(loc_file);
112323 +
112324 + current_func = build_string(NAME_LEN(current_function_decl) + 1, NAME(current_function_decl));
112325 + current_func = create_string_param(current_func);
112326 +
112327 + gcc_assert(DECL_NAME(SSA_NAME_VAR(arg)) != NULL);
112328 + call_count++;
112329 + len = asprintf(&ssa_name_buf, "%s_%u %s, count: %u\n", NAME(SSA_NAME_VAR(arg)), SSA_NAME_VERSION(arg), min ? "min" : "max", call_count);
112330 + gcc_assert(len > 0);
112331 + ssa_name = build_string(len + 1, ssa_name_buf);
112332 + free(ssa_name_buf);
112333 + ssa_name = create_string_param(ssa_name);
112334 +
112335 + // void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
112336 + func_stmt = gimple_build_call(report_size_overflow_decl, 4, loc_file, loc_line, current_func, ssa_name);
112337 + gsi_insert_after(&gsi, func_stmt, GSI_CONTINUE_LINKING);
112338 +
112339 + callee_node = cgraph_get_create_node(report_size_overflow_decl);
112340 + frequency = compute_call_stmt_bb_frequency(current_function_decl, bb_true);
112341 +
112342 +#if BUILDING_GCC_VERSION <= 4006
112343 + edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency, bb_true->loop_depth);
112344 +#else
112345 + edge = cgraph_create_edge(caller_node, callee_node, func_stmt, bb_true->count, frequency);
112346 +#endif
112347 + gcc_assert(edge != NULL);
112348 +}
112349 +
112350 +static void __unused print_the_code_insertions(const_gimple stmt)
112351 +{
112352 + location_t loc = gimple_location(stmt);
112353 +
112354 + inform(loc, "Integer size_overflow check applied here.");
112355 +}
112356 +
112357 +static void insert_check_size_overflow(struct cgraph_node *caller_node, gimple stmt, enum tree_code cond_code, tree arg, tree type_value, bool before, bool min)
112358 +{
112359 + basic_block cond_bb, join_bb, bb_true;
112360 + edge e;
112361 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
112362 +
112363 + cond_bb = gimple_bb(stmt);
112364 + if (before)
112365 + gsi_prev(&gsi);
112366 + if (gsi_end_p(gsi))
112367 + e = split_block_after_labels(cond_bb);
112368 + else
112369 + e = split_block(cond_bb, gsi_stmt(gsi));
112370 + cond_bb = e->src;
112371 + join_bb = e->dest;
112372 + e->flags = EDGE_FALSE_VALUE;
112373 + e->probability = REG_BR_PROB_BASE;
112374 +
112375 + bb_true = create_empty_bb(cond_bb);
112376 + make_edge(cond_bb, bb_true, EDGE_TRUE_VALUE);
112377 + make_edge(cond_bb, join_bb, EDGE_FALSE_VALUE);
112378 + make_edge(bb_true, join_bb, EDGE_FALLTHRU);
112379 +
112380 + gcc_assert(dom_info_available_p(CDI_DOMINATORS));
112381 + set_immediate_dominator(CDI_DOMINATORS, bb_true, cond_bb);
112382 + set_immediate_dominator(CDI_DOMINATORS, join_bb, cond_bb);
112383 +
112384 + if (current_loops != NULL) {
112385 + gcc_assert(cond_bb->loop_father == join_bb->loop_father);
112386 + add_bb_to_loop(bb_true, cond_bb->loop_father);
112387 + }
112388 +
112389 + insert_cond(cond_bb, arg, cond_code, type_value);
112390 + insert_cond_result(caller_node, bb_true, stmt, arg, min);
112391 +
112392 +// print_the_code_insertions(stmt);
112393 +}
112394 +
112395 +static void check_size_overflow(struct cgraph_node *caller_node, gimple stmt, tree size_overflow_type, tree cast_rhs, tree rhs, bool before)
112396 +{
112397 + const_tree rhs_type = TREE_TYPE(rhs);
112398 + tree cast_rhs_type, type_max_type, type_min_type, type_max, type_min;
112399 +
112400 + gcc_assert(rhs_type != NULL_TREE);
112401 + if (TREE_CODE(rhs_type) == POINTER_TYPE)
112402 + return;
112403 +
112404 + gcc_assert(TREE_CODE(rhs_type) == INTEGER_TYPE || TREE_CODE(rhs_type) == ENUMERAL_TYPE);
112405 +
112406 + if (is_const_plus_unsigned_signed_truncation(rhs))
112407 + return;
112408 +
112409 + type_max = cast_a_tree(size_overflow_type, TYPE_MAX_VALUE(rhs_type));
112410 + // typemax (-1) < typemin (0)
112411 + if (TREE_OVERFLOW(type_max))
112412 + return;
112413 +
112414 + type_min = cast_a_tree(size_overflow_type, TYPE_MIN_VALUE(rhs_type));
112415 +
112416 + cast_rhs_type = TREE_TYPE(cast_rhs);
112417 + type_max_type = TREE_TYPE(type_max);
112418 + gcc_assert(types_compatible_p(cast_rhs_type, type_max_type));
112419 +
112420 + insert_check_size_overflow(caller_node, stmt, GT_EXPR, cast_rhs, type_max, before, MAX_CHECK);
112421 +
112422 + // special case: get_size_overflow_type(), 32, u64->s
112423 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode) && TYPE_UNSIGNED(size_overflow_type) && !TYPE_UNSIGNED(rhs_type))
112424 + return;
112425 +
112426 + type_min_type = TREE_TYPE(type_min);
112427 + gcc_assert(types_compatible_p(type_max_type, type_min_type));
112428 + insert_check_size_overflow(caller_node, stmt, LT_EXPR, cast_rhs, type_min, before, MIN_CHECK);
112429 +}
112430 +
112431 +static bool is_a_constant_overflow(const_gimple stmt, const_tree rhs)
112432 +{
112433 + if (gimple_assign_rhs_code(stmt) == MIN_EXPR)
112434 + return false;
112435 + if (!is_gimple_constant(rhs))
112436 + return false;
112437 + return true;
112438 +}
112439 +
112440 +static tree get_def_stmt_rhs(const_tree var)
112441 +{
112442 + tree rhs1, def_stmt_rhs1;
112443 + gimple rhs1_def_stmt, def_stmt_rhs1_def_stmt, def_stmt;
112444 +
112445 + def_stmt = get_def_stmt(var);
112446 + if (!gimple_assign_cast_p(def_stmt))
112447 + return NULL_TREE;
112448 + gcc_assert(gimple_code(def_stmt) != GIMPLE_NOP && gimple_plf(def_stmt, MY_STMT) && gimple_assign_cast_p(def_stmt));
112449 +
112450 + rhs1 = gimple_assign_rhs1(def_stmt);
112451 + rhs1_def_stmt = get_def_stmt(rhs1);
112452 + if (!gimple_assign_cast_p(rhs1_def_stmt))
112453 + return rhs1;
112454 +
112455 + def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
112456 + def_stmt_rhs1_def_stmt = get_def_stmt(def_stmt_rhs1);
112457 +
112458 + switch (gimple_code(def_stmt_rhs1_def_stmt)) {
112459 + case GIMPLE_CALL:
112460 + case GIMPLE_NOP:
112461 + case GIMPLE_ASM:
112462 + case GIMPLE_PHI:
112463 + return def_stmt_rhs1;
112464 + case GIMPLE_ASSIGN:
112465 + return rhs1;
112466 + default:
112467 + debug_gimple_stmt(def_stmt_rhs1_def_stmt);
112468 + gcc_unreachable();
112469 + }
112470 +}
112471 +
112472 +static tree handle_intentional_overflow(struct pointer_set_t *visited, struct cgraph_node *caller_node, bool check_overflow, gimple stmt, tree change_rhs, tree new_rhs2)
112473 +{
112474 + tree new_rhs, orig_rhs;
112475 + void (*gimple_assign_set_rhs)(gimple, tree);
112476 + tree rhs1 = gimple_assign_rhs1(stmt);
112477 + tree rhs2 = gimple_assign_rhs2(stmt);
112478 + tree lhs = gimple_assign_lhs(stmt);
112479 +
112480 + if (!check_overflow)
112481 + return create_assign(visited, stmt, lhs, AFTER_STMT);
112482 +
112483 + if (change_rhs == NULL_TREE)
112484 + return create_assign(visited, stmt, lhs, AFTER_STMT);
112485 +
112486 + if (new_rhs2 == NULL_TREE) {
112487 + orig_rhs = rhs1;
112488 + gimple_assign_set_rhs = &gimple_assign_set_rhs1;
112489 + } else {
112490 + orig_rhs = rhs2;
112491 + gimple_assign_set_rhs = &gimple_assign_set_rhs2;
112492 + }
112493 +
112494 + check_size_overflow(caller_node, stmt, TREE_TYPE(change_rhs), change_rhs, orig_rhs, BEFORE_STMT);
112495 +
112496 + new_rhs = change_assign_rhs(stmt, orig_rhs, change_rhs);
112497 + gimple_assign_set_rhs(stmt, new_rhs);
112498 + update_stmt(stmt);
112499 +
112500 + return create_assign(visited, stmt, lhs, AFTER_STMT);
112501 +}
112502 +
112503 +static bool is_subtraction_special(const_gimple stmt)
112504 +{
112505 + gimple rhs1_def_stmt, rhs2_def_stmt;
112506 + const_tree rhs1_def_stmt_rhs1, rhs2_def_stmt_rhs1, rhs1_def_stmt_lhs, rhs2_def_stmt_lhs;
112507 + enum machine_mode rhs1_def_stmt_rhs1_mode, rhs2_def_stmt_rhs1_mode, rhs1_def_stmt_lhs_mode, rhs2_def_stmt_lhs_mode;
112508 + const_tree rhs1 = gimple_assign_rhs1(stmt);
112509 + const_tree rhs2 = gimple_assign_rhs2(stmt);
112510 +
112511 + if (is_gimple_constant(rhs1) || is_gimple_constant(rhs2))
112512 + return false;
112513 +
112514 + gcc_assert(TREE_CODE(rhs1) == SSA_NAME && TREE_CODE(rhs2) == SSA_NAME);
112515 +
112516 + if (gimple_assign_rhs_code(stmt) != MINUS_EXPR)
112517 + return false;
112518 +
112519 + rhs1_def_stmt = get_def_stmt(rhs1);
112520 + rhs2_def_stmt = get_def_stmt(rhs2);
112521 + if (!gimple_assign_cast_p(rhs1_def_stmt) || !gimple_assign_cast_p(rhs2_def_stmt))
112522 + return false;
112523 +
112524 + rhs1_def_stmt_rhs1 = gimple_assign_rhs1(rhs1_def_stmt);
112525 + rhs2_def_stmt_rhs1 = gimple_assign_rhs1(rhs2_def_stmt);
112526 + rhs1_def_stmt_lhs = gimple_assign_lhs(rhs1_def_stmt);
112527 + rhs2_def_stmt_lhs = gimple_assign_lhs(rhs2_def_stmt);
112528 + rhs1_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_rhs1));
112529 + rhs2_def_stmt_rhs1_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_rhs1));
112530 + rhs1_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs1_def_stmt_lhs));
112531 + rhs2_def_stmt_lhs_mode = TYPE_MODE(TREE_TYPE(rhs2_def_stmt_lhs));
112532 + if (GET_MODE_BITSIZE(rhs1_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs1_def_stmt_lhs_mode))
112533 + return false;
112534 + if (GET_MODE_BITSIZE(rhs2_def_stmt_rhs1_mode) <= GET_MODE_BITSIZE(rhs2_def_stmt_lhs_mode))
112535 + return false;
112536 +
112537 + gimple_set_plf(rhs1_def_stmt, NO_CAST_CHECK, true);
112538 + gimple_set_plf(rhs2_def_stmt, NO_CAST_CHECK, true);
112539 + return true;
112540 +}
112541 +
112542 +static tree handle_integer_truncation(struct pointer_set_t *visited, struct cgraph_node *caller_node, const_tree lhs)
112543 +{
112544 + tree new_rhs1, new_rhs2;
112545 + tree new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1, new_lhs;
112546 + gimple assign, stmt = get_def_stmt(lhs);
112547 + tree rhs1 = gimple_assign_rhs1(stmt);
112548 + tree rhs2 = gimple_assign_rhs2(stmt);
112549 +
112550 + if (!is_subtraction_special(stmt))
112551 + return NULL_TREE;
112552 +
112553 + new_rhs1 = expand(visited, caller_node, rhs1);
112554 + new_rhs2 = expand(visited, caller_node, rhs2);
112555 +
112556 + new_rhs1_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs1);
112557 + new_rhs2_def_stmt_rhs1 = get_def_stmt_rhs(new_rhs2);
112558 +
112559 + if (new_rhs1_def_stmt_rhs1 == NULL_TREE || new_rhs2_def_stmt_rhs1 == NULL_TREE)
112560 + return NULL_TREE;
112561 +
112562 + if (!types_compatible_p(TREE_TYPE(new_rhs1_def_stmt_rhs1), TREE_TYPE(new_rhs2_def_stmt_rhs1))) {
112563 + new_rhs1_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs1_def_stmt_rhs1);
112564 + new_rhs2_def_stmt_rhs1 = cast_to_TI_type(stmt, new_rhs2_def_stmt_rhs1);
112565 + }
112566 +
112567 + assign = create_binary_assign(MINUS_EXPR, stmt, new_rhs1_def_stmt_rhs1, new_rhs2_def_stmt_rhs1);
112568 + new_lhs = gimple_assign_lhs(assign);
112569 + check_size_overflow(caller_node, assign, TREE_TYPE(new_lhs), new_lhs, rhs1, AFTER_STMT);
112570 +
112571 + return dup_assign(visited, stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
112572 +}
112573 +
112574 +static bool is_a_neg_overflow(const_gimple stmt, const_tree rhs)
112575 +{
112576 + const_gimple def_stmt;
112577 +
112578 + if (TREE_CODE(rhs) != SSA_NAME)
112579 + return false;
112580 +
112581 + if (gimple_assign_rhs_code(stmt) != PLUS_EXPR)
112582 + return false;
112583 +
112584 + def_stmt = get_def_stmt(rhs);
112585 + if (!is_gimple_assign(def_stmt) || gimple_assign_rhs_code(def_stmt) != BIT_NOT_EXPR)
112586 + return false;
112587 +
112588 + return true;
112589 +}
112590 +
112591 +static tree handle_binary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
112592 +{
112593 + tree rhs1, rhs2, new_lhs;
112594 + gimple def_stmt = get_def_stmt(lhs);
112595 + tree new_rhs1 = NULL_TREE;
112596 + tree new_rhs2 = NULL_TREE;
112597 +
112598 + rhs1 = gimple_assign_rhs1(def_stmt);
112599 + rhs2 = gimple_assign_rhs2(def_stmt);
112600 +
112601 + /* no DImode/TImode division in the 32/64 bit kernel */
112602 + switch (gimple_assign_rhs_code(def_stmt)) {
112603 + case RDIV_EXPR:
112604 + case TRUNC_DIV_EXPR:
112605 + case CEIL_DIV_EXPR:
112606 + case FLOOR_DIV_EXPR:
112607 + case ROUND_DIV_EXPR:
112608 + case TRUNC_MOD_EXPR:
112609 + case CEIL_MOD_EXPR:
112610 + case FLOOR_MOD_EXPR:
112611 + case ROUND_MOD_EXPR:
112612 + case EXACT_DIV_EXPR:
112613 + case POINTER_PLUS_EXPR:
112614 + case BIT_AND_EXPR:
112615 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
112616 + default:
112617 + break;
112618 + }
112619 +
112620 + new_lhs = handle_integer_truncation(visited, caller_node, lhs);
112621 + if (new_lhs != NULL_TREE)
112622 + return new_lhs;
112623 +
112624 + if (TREE_CODE(rhs1) == SSA_NAME)
112625 + new_rhs1 = expand(visited, caller_node, rhs1);
112626 + if (TREE_CODE(rhs2) == SSA_NAME)
112627 + new_rhs2 = expand(visited, caller_node, rhs2);
112628 +
112629 + if (is_a_neg_overflow(def_stmt, rhs2))
112630 + return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs1, NULL_TREE);
112631 + if (is_a_neg_overflow(def_stmt, rhs1))
112632 + return handle_intentional_overflow(visited, caller_node, true, def_stmt, new_rhs2, new_rhs2);
112633 +
112634 +
112635 + if (is_a_constant_overflow(def_stmt, rhs2))
112636 + return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs1), def_stmt, new_rhs1, NULL_TREE);
112637 + if (is_a_constant_overflow(def_stmt, rhs1))
112638 + return handle_intentional_overflow(visited, caller_node, !is_a_cast_and_const_overflow(rhs2), def_stmt, new_rhs2, new_rhs2);
112639 +
112640 + return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, NULL_TREE);
112641 +}
112642 +
112643 +#if BUILDING_GCC_VERSION >= 4007
112644 +static tree get_new_rhs(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree size_overflow_type, tree rhs)
112645 +{
112646 + if (is_gimple_constant(rhs))
112647 + return cast_a_tree(size_overflow_type, rhs);
112648 + if (TREE_CODE(rhs) != SSA_NAME)
112649 + return NULL_TREE;
112650 + return expand(visited, caller_node, rhs);
112651 +}
112652 +
112653 +static tree handle_ternary_ops(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
112654 +{
112655 + tree rhs1, rhs2, rhs3, new_rhs1, new_rhs2, new_rhs3, size_overflow_type;
112656 + gimple def_stmt = get_def_stmt(lhs);
112657 +
112658 + size_overflow_type = get_size_overflow_type(def_stmt, lhs);
112659 +
112660 + rhs1 = gimple_assign_rhs1(def_stmt);
112661 + rhs2 = gimple_assign_rhs2(def_stmt);
112662 + rhs3 = gimple_assign_rhs3(def_stmt);
112663 + new_rhs1 = get_new_rhs(visited, caller_node, size_overflow_type, rhs1);
112664 + new_rhs2 = get_new_rhs(visited, caller_node, size_overflow_type, rhs2);
112665 + new_rhs3 = get_new_rhs(visited, caller_node, size_overflow_type, rhs3);
112666 +
112667 + return dup_assign(visited, def_stmt, lhs, new_rhs1, new_rhs2, new_rhs3);
112668 +}
112669 +#endif
112670 +
112671 +static tree get_size_overflow_type(gimple stmt, const_tree node)
112672 +{
112673 + const_tree type;
112674 + tree new_type;
112675 +
112676 + gcc_assert(node != NULL_TREE);
112677 +
112678 + type = TREE_TYPE(node);
112679 +
112680 + if (gimple_plf(stmt, MY_STMT))
112681 + return TREE_TYPE(node);
112682 +
112683 + switch (TYPE_MODE(type)) {
112684 + case QImode:
112685 + new_type = intHI_type_node;
112686 + break;
112687 + case HImode:
112688 + new_type = intSI_type_node;
112689 + break;
112690 + case SImode:
112691 + new_type = intDI_type_node;
112692 + break;
112693 + case DImode:
112694 + if (LONG_TYPE_SIZE == GET_MODE_BITSIZE(SImode))
112695 + new_type = TYPE_UNSIGNED(type) ? unsigned_intDI_type_node : intDI_type_node;
112696 + else
112697 + new_type = intTI_type_node;
112698 + break;
112699 + case TImode:
112700 + gcc_assert(!TYPE_UNSIGNED(type));
112701 + new_type = intTI_type_node;
112702 + break;
112703 + default:
112704 + debug_tree((tree)node);
112705 + error("%s: unsupported gcc configuration (%qE).", __func__, current_function_decl);
112706 + gcc_unreachable();
112707 + }
112708 +
112709 + if (TYPE_QUALS(type) != 0)
112710 + return build_qualified_type(new_type, TYPE_QUALS(type));
112711 + return new_type;
112712 +}
112713 +
112714 +static tree expand_visited(gimple def_stmt)
112715 +{
112716 + const_gimple next_stmt;
112717 + gimple_stmt_iterator gsi;
112718 + enum gimple_code code = gimple_code(def_stmt);
112719 +
112720 + if (code == GIMPLE_ASM)
112721 + return NULL_TREE;
112722 +
112723 + gsi = gsi_for_stmt(def_stmt);
112724 + gsi_next(&gsi);
112725 +
112726 + if (gimple_code(def_stmt) == GIMPLE_PHI && gsi_end_p(gsi))
112727 + return NULL_TREE;
112728 + gcc_assert(!gsi_end_p(gsi));
112729 + next_stmt = gsi_stmt(gsi);
112730 +
112731 + if (gimple_code(def_stmt) == GIMPLE_PHI && !gimple_plf((gimple)next_stmt, MY_STMT))
112732 + return NULL_TREE;
112733 + gcc_assert(gimple_plf((gimple)next_stmt, MY_STMT));
112734 +
112735 + return get_lhs(next_stmt);
112736 +}
112737 +
112738 +static tree expand(struct pointer_set_t *visited, struct cgraph_node *caller_node, tree lhs)
112739 +{
112740 + gimple def_stmt;
112741 +
112742 + def_stmt = get_def_stmt(lhs);
112743 +
112744 + if (!def_stmt || gimple_code(def_stmt) == GIMPLE_NOP)
112745 + return NULL_TREE;
112746 +
112747 + if (gimple_plf(def_stmt, MY_STMT))
112748 + return lhs;
112749 +
112750 + if (pointer_set_contains(visited, def_stmt))
112751 + return expand_visited(def_stmt);
112752 +
112753 + switch (gimple_code(def_stmt)) {
112754 + case GIMPLE_PHI:
112755 + return handle_phi(visited, caller_node, lhs);
112756 + case GIMPLE_CALL:
112757 + case GIMPLE_ASM:
112758 + return create_assign(visited, def_stmt, lhs, AFTER_STMT);
112759 + case GIMPLE_ASSIGN:
112760 + switch (gimple_num_ops(def_stmt)) {
112761 + case 2:
112762 + return handle_unary_ops(visited, caller_node, def_stmt);
112763 + case 3:
112764 + return handle_binary_ops(visited, caller_node, lhs);
112765 +#if BUILDING_GCC_VERSION >= 4007
112766 + case 4:
112767 + return handle_ternary_ops(visited, caller_node, lhs);
112768 +#endif
112769 + }
112770 + default:
112771 + debug_gimple_stmt(def_stmt);
112772 + error("%s: unknown gimple code", __func__);
112773 + gcc_unreachable();
112774 + }
112775 +}
112776 +
112777 +static tree cast_to_orig_type(gimple stmt, const_tree orig_node, tree new_node)
112778 +{
112779 + const_gimple assign;
112780 + tree orig_type = TREE_TYPE(orig_node);
112781 + gimple_stmt_iterator gsi = gsi_for_stmt(stmt);
112782 +
112783 + assign = build_cast_stmt(orig_type, new_node, CREATE_NEW_VAR, &gsi, BEFORE_STMT, false);
112784 + return gimple_assign_lhs(assign);
112785 +}
112786 +
112787 +static void change_orig_node(struct interesting_node *cur_node, tree new_node)
112788 +{
112789 + void (*set_rhs)(gimple, tree);
112790 + gimple stmt = cur_node->first_stmt;
112791 + const_tree orig_node = cur_node->node;
112792 +
112793 + switch (gimple_code(stmt)) {
112794 + case GIMPLE_RETURN:
112795 + gimple_return_set_retval(stmt, cast_to_orig_type(stmt, orig_node, new_node));
112796 + break;
112797 + case GIMPLE_CALL:
112798 + gimple_call_set_arg(stmt, cur_node->num - 1, cast_to_orig_type(stmt, orig_node, new_node));
112799 + break;
112800 + case GIMPLE_ASSIGN:
112801 + switch (cur_node->num) {
112802 + case 1:
112803 + set_rhs = &gimple_assign_set_rhs1;
112804 + break;
112805 + case 2:
112806 + set_rhs = &gimple_assign_set_rhs2;
112807 + break;
112808 +#if BUILDING_GCC_VERSION > 4005
112809 + case 3:
112810 + set_rhs = &gimple_assign_set_rhs3;
112811 + break;
112812 +#endif
112813 + default:
112814 + gcc_unreachable();
112815 + }
112816 +
112817 + set_rhs(stmt, cast_to_orig_type(stmt, orig_node, new_node));
112818 + break;
112819 + default:
112820 + debug_gimple_stmt(stmt);
112821 + gcc_unreachable();
112822 + }
112823 +
112824 + update_stmt(stmt);
112825 +}
112826 +
112827 +static unsigned int get_correct_arg_count(unsigned int argnum, tree fndecl)
112828 +{
112829 + const struct size_overflow_hash *hash;
112830 + unsigned int new_argnum;
112831 + tree arg;
112832 + const_tree origarg;
112833 +
112834 + if (argnum == 0)
112835 + return argnum;
112836 +
112837 + hash = get_function_hash(fndecl);
112838 + if (hash && hash->param & (1U << argnum))
112839 + return argnum;
112840 +
112841 + if (DECL_EXTERNAL(fndecl))
112842 + return argnum;
112843 +
112844 + origarg = DECL_ARGUMENTS(DECL_ORIGIN(fndecl));
112845 + argnum--;
112846 + while (origarg && argnum) {
112847 + origarg = TREE_CHAIN(origarg);
112848 + argnum--;
112849 + }
112850 + gcc_assert(argnum == 0);
112851 + gcc_assert(origarg != NULL_TREE);
112852 +
112853 + for (arg = DECL_ARGUMENTS(fndecl), new_argnum = 1; arg; arg = TREE_CHAIN(arg), new_argnum++)
112854 + if (operand_equal_p(origarg, arg, 0) || !strcmp(NAME(origarg), NAME(arg)))
112855 + return new_argnum;
112856 +
112857 + return CANNOT_FIND_ARG;
112858 +}
112859 +
112860 +// Don't want to duplicate entries in next_cgraph_node
112861 +static bool is_in_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, const_tree fndecl, unsigned int num)
112862 +{
112863 + const_tree new_callee_fndecl;
112864 + struct next_cgraph_node *cur_node;
112865 +
112866 + if (fndecl == RET_CHECK)
112867 +#if BUILDING_GCC_VERSION <= 4007
112868 + new_callee_fndecl = node->decl;
112869 +#else
112870 + new_callee_fndecl = node->symbol.decl;
112871 +#endif
112872 + else
112873 + new_callee_fndecl = fndecl;
112874 +
112875 + for (cur_node = head; cur_node; cur_node = cur_node->next) {
112876 +#if BUILDING_GCC_VERSION <= 4007
112877 + if (!operand_equal_p(cur_node->current_function->decl, node->decl, 0))
112878 +#else
112879 + if (!operand_equal_p(cur_node->current_function->symbol.decl, node->symbol.decl, 0))
112880 +#endif
112881 + continue;
112882 + if (!operand_equal_p(cur_node->callee_fndecl, new_callee_fndecl, 0))
112883 + continue;
112884 + if (num == cur_node->num)
112885 + return true;
112886 + }
112887 + return false;
112888 +}
112889 +
112890 +/* Add a next_cgraph_node into the list for handle_function().
112891 + * handle_function() iterates over all the next cgraph nodes and
112892 + * starts the overflow check insertion process.
112893 + */
112894 +static struct next_cgraph_node *create_new_next_cgraph_node(struct next_cgraph_node *head, struct cgraph_node *node, tree fndecl, unsigned int num)
112895 +{
112896 + struct next_cgraph_node *new_node;
112897 +
112898 + if (is_in_next_cgraph_node(head, node, fndecl, num))
112899 + return head;
112900 +
112901 + new_node = (struct next_cgraph_node *)xmalloc(sizeof(*new_node));
112902 + new_node->current_function = node;
112903 + new_node->next = NULL;
112904 + new_node->num = num;
112905 + if (fndecl == RET_CHECK)
112906 +#if BUILDING_GCC_VERSION <= 4007
112907 + new_node->callee_fndecl = node->decl;
112908 +#else
112909 + new_node->callee_fndecl = node->symbol.decl;
112910 +#endif
112911 + else
112912 + new_node->callee_fndecl = fndecl;
112913 +
112914 + if (!head)
112915 + return new_node;
112916 +
112917 + new_node->next = head;
112918 + return new_node;
112919 +}
112920 +
112921 +static struct next_cgraph_node *create_new_next_cgraph_nodes(struct next_cgraph_node *head, struct cgraph_node *node, unsigned int num)
112922 +{
112923 + struct cgraph_edge *e;
112924 +
112925 + if (num == 0)
112926 + return create_new_next_cgraph_node(head, node, RET_CHECK, num);
112927 +
112928 + for (e = node->callers; e; e = e->next_caller) {
112929 + tree fndecl = gimple_call_fndecl(e->call_stmt);
112930 +
112931 + gcc_assert(fndecl != NULL_TREE);
112932 + head = create_new_next_cgraph_node(head, e->caller, fndecl, num);
112933 + }
112934 +
112935 + return head;
112936 +}
112937 +
112938 +static bool is_a_return_check(const_tree node)
112939 +{
112940 + if (TREE_CODE(node) == FUNCTION_DECL)
112941 + return true;
112942 +
112943 + gcc_assert(TREE_CODE(node) == PARM_DECL);
112944 + return false;
112945 +}
112946 +
112947 +static bool is_in_hash_table(tree fndecl, unsigned int num)
112948 +{
112949 + const struct size_overflow_hash *hash;
112950 +
112951 + hash = get_function_hash(fndecl);
112952 + if (hash && (hash->param & (1U << num)))
112953 + return true;
112954 + return false;
112955 +}
112956 +
112957 +struct missing_functions {
112958 + struct missing_functions *next;
112959 + const_tree node;
112960 + tree fndecl;
112961 +};
112962 +
112963 +static struct missing_functions *create_new_missing_function(struct missing_functions *missing_fn_head, tree node)
112964 +{
112965 + struct missing_functions *new_function;
112966 +
112967 + new_function = (struct missing_functions *)xmalloc(sizeof(*new_function));
112968 + new_function->node = node;
112969 + new_function->next = NULL;
112970 +
112971 + if (TREE_CODE(node) == FUNCTION_DECL)
112972 + new_function->fndecl = node;
112973 + else
112974 + new_function->fndecl = current_function_decl;
112975 + gcc_assert(new_function->fndecl);
112976 +
112977 + if (!missing_fn_head)
112978 + return new_function;
112979 +
112980 + new_function->next = missing_fn_head;
112981 + return new_function;
112982 +}
112983 +
112984 +/* Check if the function has a size_overflow attribute or it is in the size_overflow hash table.
112985 + * If the function is missing everywhere then print the missing message into stderr.
112986 + */
112987 +static bool is_missing_function(tree orig_fndecl, unsigned int num)
112988 +{
112989 + switch (DECL_FUNCTION_CODE(orig_fndecl)) {
112990 +#if BUILDING_GCC_VERSION >= 4008
112991 + case BUILT_IN_BSWAP16:
112992 +#endif
112993 + case BUILT_IN_BSWAP32:
112994 + case BUILT_IN_BSWAP64:
112995 + case BUILT_IN_EXPECT:
112996 + case BUILT_IN_MEMCMP:
112997 + return false;
112998 + default:
112999 + break;
113000 + }
113001 +
113002 + // skip test.c
113003 + if (strcmp(NAME(current_function_decl), "coolmalloc")) {
113004 + if (lookup_attribute("size_overflow", DECL_ATTRIBUTES(orig_fndecl)))
113005 + warning(0, "unnecessary size_overflow attribute on: %s\n", NAME(orig_fndecl));
113006 + }
113007 +
113008 + if (is_in_hash_table(orig_fndecl, num))
113009 + return false;
113010 +
113011 + print_missing_msg(orig_fndecl, num);
113012 + return true;
113013 +}
113014 +
113015 +// Get the argnum of a function decl, if node is a return then the argnum is 0
113016 +static unsigned int get_function_num(const_tree node, const_tree orig_fndecl)
113017 +{
113018 + if (is_a_return_check(node))
113019 + return 0;
113020 + else
113021 + return find_arg_number_tree(node, orig_fndecl);
113022 +}
113023 +
113024 +/* If the function is missing from the hash table and it is a static function
113025 + * then create a next_cgraph_node from it for handle_function()
113026 + */
113027 +static struct next_cgraph_node *check_missing_overflow_attribute_and_create_next_node(struct next_cgraph_node *cnodes, struct missing_functions *missing_fn_head)
113028 +{
113029 + unsigned int num;
113030 + tree orig_fndecl;
113031 + struct cgraph_node *next_node = NULL;
113032 +
113033 + orig_fndecl = DECL_ORIGIN(missing_fn_head->fndecl);
113034 +
113035 + num = get_function_num(missing_fn_head->node, orig_fndecl);
113036 + if (num == CANNOT_FIND_ARG)
113037 + return cnodes;
113038 +
113039 + if (!is_missing_function(orig_fndecl, num))
113040 + return cnodes;
113041 +
113042 + next_node = cgraph_get_node(missing_fn_head->fndecl);
113043 + if (next_node && next_node->local.local)
113044 + cnodes = create_new_next_cgraph_nodes(cnodes, next_node, num);
113045 + return cnodes;
113046 +}
113047 +
113048 +/* Search for missing size_overflow attributes on the last nodes in ipa and collect them
113049 + * into the next_cgraph_node list. They will be the next interesting returns or callees.
113050 + */
113051 +static struct next_cgraph_node *search_overflow_attribute(struct next_cgraph_node *cnodes, struct interesting_node *cur_node)
113052 +{
113053 + unsigned int i;
113054 + tree node;
113055 + struct missing_functions *cur, *missing_fn_head = NULL;
113056 +
113057 +#if BUILDING_GCC_VERSION == 4005
113058 + for (i = 0; i < VEC_length(tree, cur_node->last_nodes); i++) {
113059 + node = VEC_index(tree, cur_node->last_nodes, i);
113060 +#elif BUILDING_GCC_VERSION <= 4007
113061 + FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, node) {
113062 +#else
113063 + FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, node) {
113064 +#endif
113065 + switch (TREE_CODE(node)) {
113066 + case PARM_DECL:
113067 + if (TREE_CODE(TREE_TYPE(node)) != INTEGER_TYPE)
113068 + break;
113069 + case FUNCTION_DECL:
113070 + missing_fn_head = create_new_missing_function(missing_fn_head, node);
113071 + break;
113072 + default:
113073 + break;
113074 + }
113075 + }
113076 +
113077 + while (missing_fn_head) {
113078 + cnodes = check_missing_overflow_attribute_and_create_next_node(cnodes, missing_fn_head);
113079 +
113080 + cur = missing_fn_head->next;
113081 + free(missing_fn_head);
113082 + missing_fn_head = cur;
113083 + }
113084 +
113085 + return cnodes;
113086 +}
113087 +
113088 +static void walk_phi_set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree result)
113089 +{
113090 + gimple phi = get_def_stmt(result);
113091 + unsigned int i, n = gimple_phi_num_args(phi);
113092 +
113093 + pointer_set_insert(visited, phi);
113094 + for (i = 0; i < n; i++) {
113095 + const_tree arg = gimple_phi_arg_def(phi, i);
113096 +
113097 + set_conditions(visited, interesting_conditions, arg);
113098 + }
113099 +}
113100 +
113101 +enum conditions {
113102 + FROM_CONST, NOT_UNARY, CAST
113103 +};
113104 +
113105 +// Search for constants, cast assignments and binary/ternary assignments
113106 +static void set_conditions(struct pointer_set_t *visited, bool *interesting_conditions, const_tree lhs)
113107 +{
113108 + gimple def_stmt = get_def_stmt(lhs);
113109 +
113110 + if (is_gimple_constant(lhs)) {
113111 + interesting_conditions[FROM_CONST] = true;
113112 + return;
113113 + }
113114 +
113115 + if (!def_stmt)
113116 + return;
113117 +
113118 + if (pointer_set_contains(visited, def_stmt))
113119 + return;
113120 +
113121 + switch (gimple_code(def_stmt)) {
113122 + case GIMPLE_NOP:
113123 + case GIMPLE_CALL:
113124 + case GIMPLE_ASM:
113125 + return;
113126 + case GIMPLE_PHI:
113127 + return walk_phi_set_conditions(visited, interesting_conditions, lhs);
113128 + case GIMPLE_ASSIGN:
113129 + if (gimple_num_ops(def_stmt) == 2) {
113130 + const_tree rhs = gimple_assign_rhs1(def_stmt);
113131 +
113132 + if (gimple_assign_cast_p(def_stmt))
113133 + interesting_conditions[CAST] = true;
113134 +
113135 + return set_conditions(visited, interesting_conditions, rhs);
113136 + } else {
113137 + interesting_conditions[NOT_UNARY] = true;
113138 + return;
113139 + }
113140 + default:
113141 + debug_gimple_stmt(def_stmt);
113142 + gcc_unreachable();
113143 + }
113144 +}
113145 +
113146 +// determine whether duplication will be necessary or not.
113147 +static void search_interesting_conditions(struct interesting_node *cur_node, bool *interesting_conditions)
113148 +{
113149 + struct pointer_set_t *visited;
113150 +
113151 + if (gimple_assign_cast_p(cur_node->first_stmt))
113152 + interesting_conditions[CAST] = true;
113153 + else if (is_gimple_assign(cur_node->first_stmt) && gimple_num_ops(cur_node->first_stmt) > 2)
113154 + interesting_conditions[NOT_UNARY] = true;
113155 +
113156 + visited = pointer_set_create();
113157 + set_conditions(visited, interesting_conditions, cur_node->node);
113158 + pointer_set_destroy(visited);
113159 +}
113160 +
113161 +// Remove the size_overflow asm stmt and create an assignment from the input and output of the asm
113162 +static void replace_size_overflow_asm_with_assign(gimple asm_stmt, tree lhs, tree rhs)
113163 +{
113164 + gimple assign;
113165 + gimple_stmt_iterator gsi;
113166 +
113167 + // already removed
113168 + if (gimple_bb(asm_stmt) == NULL)
113169 + return;
113170 + gsi = gsi_for_stmt(asm_stmt);
113171 +
113172 + assign = gimple_build_assign(lhs, rhs);
113173 + gsi_insert_before(&gsi, assign, GSI_SAME_STMT);
113174 + SSA_NAME_DEF_STMT(lhs) = assign;
113175 +
113176 + gsi_remove(&gsi, true);
113177 +}
113178 +
113179 +// Get the field decl of a component ref for intentional_overflow checking
113180 +static const_tree search_field_decl(const_tree comp_ref)
113181 +{
113182 + const_tree field = NULL_TREE;
113183 + unsigned int i, len = TREE_OPERAND_LENGTH(comp_ref);
113184 +
113185 + for (i = 0; i < len; i++) {
113186 + field = TREE_OPERAND(comp_ref, i);
113187 + if (TREE_CODE(field) == FIELD_DECL)
113188 + break;
113189 + }
113190 + gcc_assert(TREE_CODE(field) == FIELD_DECL);
113191 + return field;
113192 +}
113193 +
113194 +/* Get the fndecl of an interesting stmt, the fndecl is the caller function if the interesting
113195 + * stmt is a return otherwise it is the callee function.
113196 + */
113197 +static const_tree get_interesting_orig_fndecl(const_gimple stmt, unsigned int argnum)
113198 +{
113199 + const_tree fndecl;
113200 +
113201 + if (argnum == 0)
113202 + fndecl = current_function_decl;
113203 + else
113204 + fndecl = gimple_call_fndecl(stmt);
113205 +
113206 + if (fndecl == NULL_TREE)
113207 + return NULL_TREE;
113208 +
113209 + return DECL_ORIGIN(fndecl);
113210 +}
113211 +
113212 +/* Get the param of the intentional_overflow attribute.
113213 + * * 0: MARK_NOT_INTENTIONAL
113214 + * * 1..MAX_PARAM: MARK_YES
113215 + * * -1: MARK_TURN_OFF
113216 + */
113217 +static tree get_attribute_param(const_tree decl)
113218 +{
113219 + const_tree attr;
113220 +
113221 + if (decl == NULL_TREE)
113222 + return NULL_TREE;
113223 +
113224 + attr = lookup_attribute("intentional_overflow", DECL_ATTRIBUTES(decl));
113225 + if (!attr || !TREE_VALUE(attr))
113226 + return NULL_TREE;
113227 +
113228 + return TREE_VALUE(attr);
113229 +}
113230 +
113231 +// MARK_TURN_OFF
113232 +static bool is_turn_off_intentional_attr(const_tree decl)
113233 +{
113234 + const_tree param_head;
113235 +
113236 + param_head = get_attribute_param(decl);
113237 + if (param_head == NULL_TREE)
113238 + return false;
113239 +
113240 + if (TREE_INT_CST_HIGH(TREE_VALUE(param_head)) == -1)
113241 + return true;
113242 + return false;
113243 +}
113244 +
113245 +// MARK_NOT_INTENTIONAL
113246 +static bool is_end_intentional_intentional_attr(const_tree decl, unsigned int argnum)
113247 +{
113248 + const_tree param_head;
113249 +
113250 + if (argnum == 0)
113251 + return false;
113252 +
113253 + param_head = get_attribute_param(decl);
113254 + if (param_head == NULL_TREE)
113255 + return false;
113256 +
113257 + if (!TREE_INT_CST_LOW(TREE_VALUE(param_head)))
113258 + return true;
113259 + return false;
113260 +}
113261 +
113262 +// MARK_YES
113263 +static bool is_yes_intentional_attr(const_tree decl, unsigned int argnum)
113264 +{
113265 + tree param, param_head;
113266 +
113267 + if (argnum == 0)
113268 + return false;
113269 +
113270 + param_head = get_attribute_param(decl);
113271 + for (param = param_head; param; param = TREE_CHAIN(param))
113272 + if (argnum == TREE_INT_CST_LOW(TREE_VALUE(param)))
113273 + return true;
113274 + return false;
113275 +}
113276 +
113277 +static const char *get_asm_string(const_gimple stmt)
113278 +{
113279 + if (!stmt)
113280 + return NULL;
113281 + if (gimple_code(stmt) != GIMPLE_ASM)
113282 + return NULL;
113283 +
113284 + return gimple_asm_string(stmt);
113285 +}
113286 +
113287 +static bool is_size_overflow_intentional_asm_turn_off(const_gimple stmt)
113288 +{
113289 + const char *str;
113290 +
113291 + str = get_asm_string(stmt);
113292 + if (!str)
113293 + return false;
113294 + return !strcmp(str, TURN_OFF_ASM_STR);
113295 +}
113296 +
113297 +static bool is_size_overflow_intentional_asm_yes(const_gimple stmt)
113298 +{
113299 + const char *str;
113300 +
113301 + str = get_asm_string(stmt);
113302 + if (!str)
113303 + return false;
113304 + return !strcmp(str, YES_ASM_STR);
113305 +}
113306 +
113307 +static bool is_size_overflow_asm(const_gimple stmt)
113308 +{
113309 + const char *str;
113310 +
113311 + str = get_asm_string(stmt);
113312 + if (!str)
113313 + return false;
113314 + return !strncmp(str, "# size_overflow", 15);
113315 +}
113316 +
113317 +static void print_missing_intentional(enum mark callee_attr, enum mark caller_attr, const_tree decl, unsigned int argnum)
113318 +{
113319 + location_t loc;
113320 +
113321 + if (caller_attr == MARK_NO || caller_attr == MARK_NOT_INTENTIONAL || caller_attr == MARK_TURN_OFF)
113322 + return;
113323 +
113324 + if (callee_attr == MARK_NOT_INTENTIONAL || callee_attr == MARK_YES)
113325 + return;
113326 +
113327 + loc = DECL_SOURCE_LOCATION(decl);
113328 + inform(loc, "The intentional_overflow attribute is missing from +%s+%u+", NAME(decl), argnum);
113329 +}
113330 +
113331 +/* Get the type of the intentional_overflow attribute of a node
113332 + * * MARK_TURN_OFF
113333 + * * MARK_YES
113334 + * * MARK_NO
113335 + * * MARK_NOT_INTENTIONAL
113336 + */
113337 +static enum mark get_intentional_attr_type(const_tree node)
113338 +{
113339 + const_tree cur_decl;
113340 +
113341 + if (node == NULL_TREE)
113342 + return MARK_NO;
113343 +
113344 + switch (TREE_CODE(node)) {
113345 + case COMPONENT_REF:
113346 + cur_decl = search_field_decl(node);
113347 + if (is_turn_off_intentional_attr(cur_decl))
113348 + return MARK_TURN_OFF;
113349 + if (is_end_intentional_intentional_attr(cur_decl, 1))
113350 + return MARK_YES;
113351 + break;
113352 + case PARM_DECL: {
113353 + unsigned int argnum;
113354 +
113355 + cur_decl = DECL_ORIGIN(current_function_decl);
113356 + argnum = find_arg_number_tree(node, cur_decl);
113357 + if (argnum == CANNOT_FIND_ARG)
113358 + return MARK_NO;
113359 + if (is_yes_intentional_attr(cur_decl, argnum))
113360 + return MARK_YES;
113361 + if (is_end_intentional_intentional_attr(cur_decl, argnum))
113362 + return MARK_NOT_INTENTIONAL;
113363 + break;
113364 + }
113365 + case FUNCTION_DECL:
113366 + if (is_turn_off_intentional_attr(DECL_ORIGIN(node)))
113367 + return MARK_TURN_OFF;
113368 + break;
113369 + default:
113370 + break;
113371 + }
113372 + return MARK_NO;
113373 +}
113374 +
113375 +// Search for the intentional_overflow attribute on the last nodes
113376 +static enum mark search_last_nodes_intentional(struct interesting_node *cur_node)
113377 +{
113378 + unsigned int i;
113379 + tree last_node;
113380 + enum mark mark = MARK_NO;
113381 +
113382 +#if BUILDING_GCC_VERSION == 4005
113383 + for (i = 0; i < VEC_length(tree, cur_node->last_nodes); i++) {
113384 + last_node = VEC_index(tree, cur_node->last_nodes, i);
113385 +#elif BUILDING_GCC_VERSION <= 4007
113386 + FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, last_node) {
113387 +#else
113388 + FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, last_node) {
113389 +#endif
113390 + mark = get_intentional_attr_type(last_node);
113391 + if (mark != MARK_NO)
113392 + break;
113393 + }
113394 + return mark;
113395 +}
113396 +
113397 +/* Check the intentional kind of size_overflow asm stmt (created by the gimple pass) and
113398 + * set the appropriate intentional_overflow type. Delete the asm stmt in the end.
113399 + */
113400 +static bool is_intentional_attribute_from_gimple(struct interesting_node *cur_node)
113401 +{
113402 + if (!cur_node->intentional_mark_from_gimple)
113403 + return false;
113404 +
113405 + if (is_size_overflow_intentional_asm_yes(cur_node->intentional_mark_from_gimple))
113406 + cur_node->intentional_attr_cur_fndecl = MARK_YES;
113407 + else
113408 + cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
113409 +
113410 + // skip param decls
113411 + if (gimple_asm_noutputs(cur_node->intentional_mark_from_gimple) == 0)
113412 + return true;
113413 + return true;
113414 +}
113415 +
113416 +/* Search intentional_overflow attribute on caller and on callee too.
113417 + * 0</MARK_YES: no dup, search size_overflow and intentional_overflow attributes
113418 + * 0/MARK_NOT_INTENTIONAL: no dup, search size_overflow attribute (int)
113419 + * -1/MARK_TURN_OFF: no dup, no search, current_function_decl -> no dup
113420 +*/
113421 +static void check_intentional_attribute_ipa(struct interesting_node *cur_node)
113422 +{
113423 + const_tree fndecl;
113424 +
113425 + if (is_intentional_attribute_from_gimple(cur_node))
113426 + return;
113427 +
113428 + if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
113429 + cur_node->intentional_attr_cur_fndecl = MARK_TURN_OFF;
113430 + return;
113431 + }
113432 +
113433 + if (gimple_code(cur_node->first_stmt) == GIMPLE_ASM) {
113434 + cur_node->intentional_attr_cur_fndecl = MARK_NOT_INTENTIONAL;
113435 + return;
113436 + }
113437 +
113438 + if (gimple_code(cur_node->first_stmt) == GIMPLE_ASSIGN)
113439 + return;
113440 +
113441 + fndecl = get_interesting_orig_fndecl(cur_node->first_stmt, cur_node->num);
113442 + if (is_turn_off_intentional_attr(fndecl)) {
113443 + cur_node->intentional_attr_decl = MARK_TURN_OFF;
113444 + return;
113445 + }
113446 +
113447 + if (is_end_intentional_intentional_attr(fndecl, cur_node->num))
113448 + cur_node->intentional_attr_decl = MARK_NOT_INTENTIONAL;
113449 + else if (is_yes_intentional_attr(fndecl, cur_node->num))
113450 + cur_node->intentional_attr_decl = MARK_YES;
113451 +
113452 + cur_node->intentional_attr_cur_fndecl = search_last_nodes_intentional(cur_node);
113453 + print_missing_intentional(cur_node->intentional_attr_decl, cur_node->intentional_attr_cur_fndecl, cur_node->fndecl, cur_node->num);
113454 +}
113455 +
113456 +// e.g., 3.8.2, 64, arch/x86/ia32/ia32_signal.c copy_siginfo_from_user32(): compat_ptr() u32 max
113457 +static bool skip_asm(const_tree arg)
113458 +{
113459 + gimple def_stmt = get_def_stmt(arg);
113460 +
113461 + if (!def_stmt || !gimple_assign_cast_p(def_stmt))
113462 + return false;
113463 +
113464 + def_stmt = get_def_stmt(gimple_assign_rhs1(def_stmt));
113465 + return def_stmt && gimple_code(def_stmt) == GIMPLE_ASM;
113466 +}
113467 +
113468 +static void walk_use_def_phi(struct pointer_set_t *visited, struct interesting_node *cur_node, tree result)
113469 +{
113470 + gimple phi = get_def_stmt(result);
113471 + unsigned int i, n = gimple_phi_num_args(phi);
113472 +
113473 + pointer_set_insert(visited, phi);
113474 + for (i = 0; i < n; i++) {
113475 + tree arg = gimple_phi_arg_def(phi, i);
113476 +
113477 + walk_use_def(visited, cur_node, arg);
113478 + }
113479 +}
113480 +
113481 +static void walk_use_def_binary(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
113482 +{
113483 + gimple def_stmt = get_def_stmt(lhs);
113484 + tree rhs1, rhs2;
113485 +
113486 + rhs1 = gimple_assign_rhs1(def_stmt);
113487 + rhs2 = gimple_assign_rhs2(def_stmt);
113488 +
113489 + walk_use_def(visited, cur_node, rhs1);
113490 + walk_use_def(visited, cur_node, rhs2);
113491 +}
113492 +
113493 +static void insert_last_node(struct interesting_node *cur_node, tree node)
113494 +{
113495 + unsigned int i;
113496 + tree element;
113497 + enum tree_code code;
113498 +
113499 + gcc_assert(node != NULL_TREE);
113500 +
113501 + if (is_gimple_constant(node))
113502 + return;
113503 +
113504 + code = TREE_CODE(node);
113505 + if (code == VAR_DECL) {
113506 + node = DECL_ORIGIN(node);
113507 + code = TREE_CODE(node);
113508 + }
113509 +
113510 + if (code != PARM_DECL && code != FUNCTION_DECL && code != COMPONENT_REF)
113511 + return;
113512 +
113513 +#if BUILDING_GCC_VERSION == 4005
113514 + for (i = 0; i < VEC_length(tree, cur_node->last_nodes); i++) {
113515 + element = VEC_index(tree, cur_node->last_nodes, i);
113516 +#elif BUILDING_GCC_VERSION <= 4007
113517 + FOR_EACH_VEC_ELT(tree, cur_node->last_nodes, i, element) {
113518 +#else
113519 + FOR_EACH_VEC_ELT(*cur_node->last_nodes, i, element) {
113520 +#endif
113521 + if (operand_equal_p(node, element, 0))
113522 + return;
113523 + }
113524 +
113525 +#if BUILDING_GCC_VERSION <= 4007
113526 + gcc_assert(VEC_length(tree, cur_node->last_nodes) < VEC_LEN);
113527 + VEC_safe_push(tree, gc, cur_node->last_nodes, node);
113528 +#else
113529 + gcc_assert(cur_node->last_nodes->length() < VEC_LEN);
113530 + vec_safe_push(cur_node->last_nodes, node);
113531 +#endif
113532 +}
113533 +
113534 +// a size_overflow asm stmt in the control flow doesn't stop the recursion
113535 +static void handle_asm_stmt(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs, const_gimple stmt)
113536 +{
113537 + if (!is_size_overflow_asm(stmt))
113538 + walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
113539 +}
113540 +
113541 +/* collect the parm_decls and fndecls (for checking a missing size_overflow attribute (ret or arg) or intentional_overflow)
113542 + * and component refs (for checking the intentional_overflow attribute).
113543 + */
113544 +static void walk_use_def(struct pointer_set_t *visited, struct interesting_node *cur_node, tree lhs)
113545 +{
113546 + const_gimple def_stmt;
113547 +
113548 + if (TREE_CODE(lhs) != SSA_NAME) {
113549 + insert_last_node(cur_node, lhs);
113550 + return;
113551 + }
113552 +
113553 + def_stmt = get_def_stmt(lhs);
113554 + if (!def_stmt)
113555 + return;
113556 +
113557 + if (pointer_set_insert(visited, def_stmt))
113558 + return;
113559 +
113560 + switch (gimple_code(def_stmt)) {
113561 + case GIMPLE_NOP:
113562 + return walk_use_def(visited, cur_node, SSA_NAME_VAR(lhs));
113563 + case GIMPLE_ASM:
113564 + return handle_asm_stmt(visited, cur_node, lhs, def_stmt);
113565 + case GIMPLE_CALL: {
113566 + tree fndecl = gimple_call_fndecl(def_stmt);
113567 +
113568 + if (fndecl == NULL_TREE)
113569 + return;
113570 + insert_last_node(cur_node, fndecl);
113571 + return;
113572 + }
113573 + case GIMPLE_PHI:
113574 + return walk_use_def_phi(visited, cur_node, lhs);
113575 + case GIMPLE_ASSIGN:
113576 + switch (gimple_num_ops(def_stmt)) {
113577 + case 2:
113578 + return walk_use_def(visited, cur_node, gimple_assign_rhs1(def_stmt));
113579 + case 3:
113580 + return walk_use_def_binary(visited, cur_node, lhs);
113581 + }
113582 + default:
113583 + debug_gimple_stmt((gimple)def_stmt);
113584 + error("%s: unknown gimple code", __func__);
113585 + gcc_unreachable();
113586 + }
113587 +}
113588 +
113589 +// Collect all the last nodes for checking the intentional_overflow and size_overflow attributes
113590 +static void set_last_nodes(struct interesting_node *cur_node)
113591 +{
113592 + struct pointer_set_t *visited;
113593 +
113594 + visited = pointer_set_create();
113595 + walk_use_def(visited, cur_node, cur_node->node);
113596 + pointer_set_destroy(visited);
113597 +}
113598 +
113599 +enum precond {
113600 + NO_ATTRIBUTE_SEARCH, NO_CHECK_INSERT, NONE
113601 +};
113602 +
113603 +/* If there is a mark_turn_off intentional attribute on the caller or the callee then there is no duplication and missing size_overflow attribute check anywhere.
113604 + * There is only missing size_overflow attribute checking if the intentional_overflow attribute is the mark_no type.
113605 + * Stmt duplication is unnecessary if there are no binary/ternary assignements or if the unary assignment isn't a cast.
113606 + * It skips the possible error codes too. If the def_stmts trace back to a constant and there are no binary/ternary assigments then we assume that it is some kind of error code.
113607 + */
113608 +static enum precond check_preconditions(struct interesting_node *cur_node)
113609 +{
113610 + bool interesting_conditions[3] = {false, false, false};
113611 +
113612 + set_last_nodes(cur_node);
113613 +
113614 + check_intentional_attribute_ipa(cur_node);
113615 + if (cur_node->intentional_attr_decl == MARK_TURN_OFF || cur_node->intentional_attr_cur_fndecl == MARK_TURN_OFF)
113616 + return NO_ATTRIBUTE_SEARCH;
113617 +
113618 + search_interesting_conditions(cur_node, interesting_conditions);
113619 +
113620 + // error code
113621 + if (interesting_conditions[CAST] && interesting_conditions[FROM_CONST] && !interesting_conditions[NOT_UNARY])
113622 + return NO_ATTRIBUTE_SEARCH;
113623 +
113624 + // unnecessary overflow check
113625 + if (!interesting_conditions[CAST] && !interesting_conditions[NOT_UNARY])
113626 + return NO_CHECK_INSERT;
113627 +
113628 + if (cur_node->intentional_attr_cur_fndecl != MARK_NO)
113629 + return NO_CHECK_INSERT;
113630 +
113631 + return NONE;
113632 +}
113633 +
113634 +/* This function calls the main recursion function (expand) that duplicates the stmts. Before that it checks the intentional_overflow attribute and asm stmts,
113635 + * it decides whether the duplication is necessary or not and it searches for missing size_overflow attributes. After expand() it changes the orig node to the duplicated node
113636 + * in the original stmt (first stmt) and it inserts the overflow check for the arg of the callee or for the return value.
113637 + */
113638 +static struct next_cgraph_node *handle_interesting_stmt(struct next_cgraph_node *cnodes, struct interesting_node *cur_node, struct cgraph_node *caller_node)
113639 +{
113640 + enum precond ret;
113641 + struct pointer_set_t *visited;
113642 + tree new_node, orig_node = cur_node->node;
113643 +
113644 + ret = check_preconditions(cur_node);
113645 + if (ret == NO_ATTRIBUTE_SEARCH)
113646 + return cnodes;
113647 +
113648 + cnodes = search_overflow_attribute(cnodes, cur_node);
113649 +
113650 + if (ret == NO_CHECK_INSERT)
113651 + return cnodes;
113652 +
113653 + visited = pointer_set_create();
113654 + new_node = expand(visited, caller_node, orig_node);
113655 + pointer_set_destroy(visited);
113656 +
113657 + if (new_node == NULL_TREE)
113658 + return cnodes;
113659 +
113660 + change_orig_node(cur_node, new_node);
113661 + check_size_overflow(caller_node, cur_node->first_stmt, TREE_TYPE(new_node), new_node, orig_node, BEFORE_STMT);
113662 +
113663 + return cnodes;
113664 +}
113665 +
113666 +// Check visited interesting nodes.
113667 +static bool is_in_interesting_node(struct interesting_node *head, const_gimple first_stmt, const_tree node, unsigned int num)
113668 +{
113669 + struct interesting_node *cur;
113670 +
113671 + for (cur = head; cur; cur = cur->next) {
113672 + if (!operand_equal_p(node, cur->node, 0))
113673 + continue;
113674 + if (num != cur->num)
113675 + continue;
113676 + if (first_stmt == cur->first_stmt)
113677 + return true;
113678 + }
113679 + return false;
113680 +}
113681 +
113682 +/* Create an interesting node. The ipa pass starts to duplicate from these stmts.
113683 + first_stmt: it is the call or assignment or ret stmt, change_orig_node() will change the original node (retval, or function arg) in this
113684 + last_nodes: they are the last stmts in the recursion (they haven't a def_stmt). They are useful in the missing size_overflow attribute check and
113685 + the intentional_overflow attribute check. They are collected by set_last_nodes().
113686 + num: arg count of a call stmt or 0 when it is a ret
113687 + node: the recursion starts from here, it is a call arg or a return value
113688 + fndecl: the fndecl of the interesting node when the node is an arg. it is the fndecl of the callee function otherwise it is the fndecl of the caller (current_function_fndecl) function.
113689 + intentional_attr_decl: intentional_overflow attribute of the callee function
113690 + intentional_attr_cur_fndecl: intentional_overflow attribute of the caller function
113691 + intentional_mark_from_gimple: the intentional overflow type of size_overflow asm stmt from gimple if it exists
113692 + */
113693 +static struct interesting_node *create_new_interesting_node(struct interesting_node *head, gimple first_stmt, tree node, unsigned int num, gimple asm_stmt)
113694 +{
113695 + struct interesting_node *new_node;
113696 + tree fndecl;
113697 + enum gimple_code code;
113698 +
113699 + gcc_assert(node != NULL_TREE);
113700 + code = gimple_code(first_stmt);
113701 + gcc_assert(code == GIMPLE_CALL || code == GIMPLE_ASM || code == GIMPLE_ASSIGN || code == GIMPLE_RETURN);
113702 +
113703 + if (num == CANNOT_FIND_ARG)
113704 + return head;
113705 +
113706 + if (skip_types(node))
113707 + return head;
113708 +
113709 + if (skip_asm(node))
113710 + return head;
113711 +
113712 + if (is_gimple_call(first_stmt))
113713 + fndecl = gimple_call_fndecl(first_stmt);
113714 + else
113715 + fndecl = current_function_decl;
113716 +
113717 + if (fndecl == NULL_TREE)
113718 + return head;
113719 +
113720 + if (is_in_interesting_node(head, first_stmt, node, num))
113721 + return head;
113722 +
113723 + new_node = (struct interesting_node *)xmalloc(sizeof(*new_node));
113724 +
113725 + new_node->next = NULL;
113726 + new_node->first_stmt = first_stmt;
113727 +#if BUILDING_GCC_VERSION <= 4007
113728 + new_node->last_nodes = VEC_alloc(tree, gc, VEC_LEN);
113729 +#else
113730 + vec_alloc(new_node->last_nodes, VEC_LEN);
113731 +#endif
113732 + new_node->num = num;
113733 + new_node->node = node;
113734 + new_node->fndecl = fndecl;
113735 + new_node->intentional_attr_decl = MARK_NO;
113736 + new_node->intentional_attr_cur_fndecl = MARK_NO;
113737 + new_node->intentional_mark_from_gimple = asm_stmt;
113738 +
113739 + if (!head)
113740 + return new_node;
113741 +
113742 + new_node->next = head;
113743 + return new_node;
113744 +}
113745 +
113746 +/* Check the ret stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
113747 + * If the ret stmt is in the next cgraph node list then it's an interesting ret.
113748 + */
113749 +static struct interesting_node *handle_stmt_by_cgraph_nodes_ret(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
113750 +{
113751 + struct next_cgraph_node *cur_node;
113752 + tree ret = gimple_return_retval(stmt);
113753 +
113754 + if (ret == NULL_TREE)
113755 + return head;
113756 +
113757 + for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
113758 + if (!operand_equal_p(cur_node->callee_fndecl, DECL_ORIGIN(current_function_decl), 0))
113759 + continue;
113760 + if (cur_node->num == 0)
113761 + head = create_new_interesting_node(head, stmt, ret, 0, NOT_INTENTIONAL_ASM);
113762 + }
113763 +
113764 + return head;
113765 +}
113766 +
113767 +/* Check the call stmts in the functions on the next cgraph node list (these functions will be in the hash table and they are reachable from ipa).
113768 + * If the call stmt is in the next cgraph node list then it's an interesting call.
113769 + */
113770 +static struct interesting_node *handle_stmt_by_cgraph_nodes_call(struct interesting_node *head, gimple stmt, struct next_cgraph_node *next_node)
113771 +{
113772 + unsigned int argnum;
113773 + tree fndecl, arg;
113774 + struct next_cgraph_node *cur_node;
113775 +
113776 + fndecl = gimple_call_fndecl(stmt);
113777 + if (fndecl == NULL_TREE)
113778 + return head;
113779 +
113780 + for (cur_node = next_node; cur_node; cur_node = cur_node->next) {
113781 + if (!operand_equal_p(cur_node->callee_fndecl, fndecl, 0))
113782 + continue;
113783 + argnum = get_correct_arg_count(cur_node->num, fndecl);
113784 + gcc_assert(argnum != CANNOT_FIND_ARG);
113785 + if (argnum == 0)
113786 + continue;
113787 +
113788 + arg = gimple_call_arg(stmt, argnum - 1);
113789 + head = create_new_interesting_node(head, stmt, arg, argnum, NOT_INTENTIONAL_ASM);
113790 + }
113791 +
113792 + return head;
113793 +}
113794 +
113795 +static unsigned int check_ops(const_tree orig_node, const_tree node, unsigned int ret_count)
113796 +{
113797 + if (!operand_equal_p(orig_node, node, 0))
113798 + return WRONG_NODE;
113799 + if (skip_types(node))
113800 + return WRONG_NODE;
113801 + return ret_count;
113802 +}
113803 +
113804 +// Get the index of the rhs node in an assignment
113805 +static unsigned int get_assign_ops_count(const_gimple stmt, tree node)
113806 +{
113807 + const_tree rhs1, rhs2;
113808 + unsigned int ret;
113809 +
113810 + gcc_assert(stmt);
113811 + gcc_assert(is_gimple_assign(stmt));
113812 +
113813 + rhs1 = gimple_assign_rhs1(stmt);
113814 + gcc_assert(rhs1 != NULL_TREE);
113815 +
113816 + switch (gimple_num_ops(stmt)) {
113817 + case 2:
113818 + return check_ops(node, rhs1, 1);
113819 + case 3:
113820 + ret = check_ops(node, rhs1, 1);
113821 + if (ret != WRONG_NODE)
113822 + return ret;
113823 +
113824 + rhs2 = gimple_assign_rhs2(stmt);
113825 + gcc_assert(rhs2 != NULL_TREE);
113826 + return check_ops(node, rhs2, 2);
113827 + default:
113828 + gcc_unreachable();
113829 + }
113830 +}
113831 +
113832 +// Find the correct arg number of a call stmt. It is needed when the interesting function is a cloned function.
113833 +static unsigned int find_arg_number_gimple(const_tree arg, const_gimple stmt)
113834 +{
113835 + unsigned int i;
113836 +
113837 + if (gimple_call_fndecl(stmt) == NULL_TREE)
113838 + return CANNOT_FIND_ARG;
113839 +
113840 + for (i = 0; i < gimple_call_num_args(stmt); i++) {
113841 + tree node;
113842 +
113843 + node = gimple_call_arg(stmt, i);
113844 + if (!operand_equal_p(arg, node, 0))
113845 + continue;
113846 + if (!skip_types(node))
113847 + return i + 1;
113848 + }
113849 +
113850 + return CANNOT_FIND_ARG;
113851 +}
113852 +
113853 +/* starting from the size_overflow asm stmt collect interesting stmts. They can be
113854 + * any of return, call or assignment stmts (because of inlining).
113855 + */
113856 +static struct interesting_node *get_interesting_ret_or_call(struct pointer_set_t *visited, struct interesting_node *head, tree node, gimple intentional_asm)
113857 +{
113858 + use_operand_p use_p;
113859 + imm_use_iterator imm_iter;
113860 + unsigned int argnum;
113861 +
113862 + gcc_assert(TREE_CODE(node) == SSA_NAME);
113863 +
113864 + if (pointer_set_insert(visited, node))
113865 + return head;
113866 +
113867 + FOR_EACH_IMM_USE_FAST(use_p, imm_iter, node) {
113868 + gimple stmt = USE_STMT(use_p);
113869 +
113870 + if (stmt == NULL)
113871 + return head;
113872 + if (is_gimple_debug(stmt))
113873 + continue;
113874 +
113875 + switch (gimple_code(stmt)) {
113876 + case GIMPLE_CALL:
113877 + argnum = find_arg_number_gimple(node, stmt);
113878 + head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
113879 + break;
113880 + case GIMPLE_RETURN:
113881 + head = create_new_interesting_node(head, stmt, node, 0, intentional_asm);
113882 + break;
113883 + case GIMPLE_ASSIGN:
113884 + argnum = get_assign_ops_count(stmt, node);
113885 + head = create_new_interesting_node(head, stmt, node, argnum, intentional_asm);
113886 + break;
113887 + case GIMPLE_PHI: {
113888 + tree result = gimple_phi_result(stmt);
113889 + head = get_interesting_ret_or_call(visited, head, result, intentional_asm);
113890 + break;
113891 + }
113892 + case GIMPLE_ASM:
113893 + if (gimple_asm_noutputs(stmt) != 0)
113894 + break;
113895 + if (!is_size_overflow_asm(stmt))
113896 + break;
113897 + head = create_new_interesting_node(head, stmt, node, 1, intentional_asm);
113898 + break;
113899 + case GIMPLE_COND:
113900 + case GIMPLE_SWITCH:
113901 + break;
113902 + default:
113903 + debug_gimple_stmt(stmt);
113904 + gcc_unreachable();
113905 + break;
113906 + }
113907 + }
113908 + return head;
113909 +}
113910 +
113911 +static void remove_size_overflow_asm(gimple stmt)
113912 +{
113913 + gimple_stmt_iterator gsi;
113914 + tree input, output;
113915 +
113916 + if (!is_size_overflow_asm(stmt))
113917 + return;
113918 +
113919 + if (gimple_asm_noutputs(stmt) == 0) {
113920 + gsi = gsi_for_stmt(stmt);
113921 + gsi_remove(&gsi, true);
113922 + return;
113923 + }
113924 +
113925 + input = gimple_asm_input_op(stmt, 0);
113926 + output = gimple_asm_output_op(stmt, 0);
113927 + replace_size_overflow_asm_with_assign(stmt, TREE_VALUE(output), TREE_VALUE(input));
113928 +}
113929 +
113930 +/* handle the size_overflow asm stmts from the gimple pass and collect the interesting stmts.
113931 + * If the asm stmt is a parm_decl kind (noutputs == 0) then remove it.
113932 + * If it is a simple asm stmt then replace it with an assignment from the asm input to the asm output.
113933 + */
113934 +static struct interesting_node *handle_stmt_by_size_overflow_asm(gimple stmt, struct interesting_node *head)
113935 +{
113936 + const_tree output;
113937 + struct pointer_set_t *visited;
113938 + gimple intentional_asm = NOT_INTENTIONAL_ASM;
113939 +
113940 + if (!is_size_overflow_asm(stmt))
113941 + return head;
113942 +
113943 + if (is_size_overflow_intentional_asm_yes(stmt) || is_size_overflow_intentional_asm_turn_off(stmt))
113944 + intentional_asm = stmt;
113945 +
113946 + gcc_assert(gimple_asm_ninputs(stmt) == 1);
113947 +
113948 + if (gimple_asm_noutputs(stmt) == 0 && is_size_overflow_intentional_asm_turn_off(stmt))
113949 + return head;
113950 +
113951 + if (gimple_asm_noutputs(stmt) == 0) {
113952 + const_tree input;
113953 +
113954 + if (!is_size_overflow_intentional_asm_turn_off(stmt))
113955 + return head;
113956 +
113957 + input = gimple_asm_input_op(stmt, 0);
113958 + remove_size_overflow_asm(stmt);
113959 + if (is_gimple_constant(TREE_VALUE(input)))
113960 + return head;
113961 + visited = pointer_set_create();
113962 + head = get_interesting_ret_or_call(visited, head, TREE_VALUE(input), intentional_asm);
113963 + pointer_set_destroy(visited);
113964 + return head;
113965 + }
113966 +
113967 + if (!is_size_overflow_intentional_asm_yes(stmt) && !is_size_overflow_intentional_asm_turn_off(stmt))
113968 + remove_size_overflow_asm(stmt);
113969 +
113970 + visited = pointer_set_create();
113971 + output = gimple_asm_output_op(stmt, 0);
113972 + head = get_interesting_ret_or_call(visited, head, TREE_VALUE(output), intentional_asm);
113973 + pointer_set_destroy(visited);
113974 + return head;
113975 +}
113976 +
113977 +/* Iterate over all the stmts of a function and look for the size_overflow asm stmts (they were created in the gimple pass)
113978 + * or a call stmt or a return stmt and store them in the interesting_node list
113979 + */
113980 +static struct interesting_node *collect_interesting_stmts(struct next_cgraph_node *next_node)
113981 +{
113982 + basic_block bb;
113983 + struct interesting_node *head = NULL;
113984 +
113985 + FOR_ALL_BB(bb) {
113986 + gimple_stmt_iterator gsi;
113987 +
113988 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
113989 + enum gimple_code code;
113990 + gimple stmt = gsi_stmt(gsi);
113991 +
113992 + code = gimple_code(stmt);
113993 +
113994 + if (code == GIMPLE_ASM)
113995 + head = handle_stmt_by_size_overflow_asm(stmt, head);
113996 +
113997 + if (!next_node)
113998 + continue;
113999 + if (code == GIMPLE_CALL)
114000 + head = handle_stmt_by_cgraph_nodes_call(head, stmt, next_node);
114001 + if (code == GIMPLE_RETURN)
114002 + head = handle_stmt_by_cgraph_nodes_ret(head, stmt, next_node);
114003 + }
114004 + }
114005 + return head;
114006 +}
114007 +
114008 +static void set_current_function_decl(tree fndecl)
114009 +{
114010 + gcc_assert(fndecl != NULL_TREE);
114011 +
114012 + push_cfun(DECL_STRUCT_FUNCTION(fndecl));
114013 + calculate_dominance_info(CDI_DOMINATORS);
114014 + current_function_decl = fndecl;
114015 +}
114016 +
114017 +static void unset_current_function_decl(void)
114018 +{
114019 + free_dominance_info(CDI_DOMINATORS);
114020 + pop_cfun();
114021 + current_function_decl = NULL_TREE;
114022 +}
114023 +
114024 +static void free_interesting_node(struct interesting_node *head)
114025 +{
114026 + struct interesting_node *cur;
114027 +
114028 + while (head) {
114029 + cur = head->next;
114030 +#if BUILDING_GCC_VERSION <= 4007
114031 + VEC_free(tree, gc, head->last_nodes);
114032 +#else
114033 + vec_free(head->last_nodes);
114034 +#endif
114035 + free(head);
114036 + head = cur;
114037 + }
114038 +}
114039 +
114040 +static struct visited *insert_visited_function(struct visited *head, struct interesting_node *cur_node)
114041 +{
114042 + struct visited *new_visited;
114043 +
114044 + new_visited = (struct visited *)xmalloc(sizeof(*new_visited));
114045 + new_visited->fndecl = cur_node->fndecl;
114046 + new_visited->num = cur_node->num;
114047 + new_visited->first_stmt = cur_node->first_stmt;
114048 + new_visited->next = NULL;
114049 +
114050 + if (!head)
114051 + return new_visited;
114052 +
114053 + new_visited->next = head;
114054 + return new_visited;
114055 +}
114056 +
114057 +/* Check whether the function was already visited. If the fndecl, the arg count of the fndecl and the first_stmt (call or return) are same then
114058 + * it is a visited function.
114059 + */
114060 +static bool is_visited_function(struct visited *head, struct interesting_node *cur_node)
114061 +{
114062 + struct visited *cur;
114063 +
114064 + if (!head)
114065 + return false;
114066 +
114067 + for (cur = head; cur; cur = cur->next) {
114068 + if (!operand_equal_p(cur_node->fndecl, cur->fndecl, 0))
114069 + continue;
114070 + if (cur_node->num != cur->num)
114071 + continue;
114072 + if (cur_node->first_stmt == cur->first_stmt)
114073 + return true;
114074 + }
114075 + return false;
114076 +}
114077 +
114078 +static void free_next_cgraph_node(struct next_cgraph_node *head)
114079 +{
114080 + struct next_cgraph_node *cur;
114081 +
114082 + while (head) {
114083 + cur = head->next;
114084 + free(head);
114085 + head = cur;
114086 + }
114087 +}
114088 +
114089 +static void remove_all_size_overflow_asm(void)
114090 +{
114091 + basic_block bb;
114092 +
114093 + FOR_ALL_BB(bb) {
114094 + gimple_stmt_iterator si;
114095 +
114096 + for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
114097 + remove_size_overflow_asm(gsi_stmt(si));
114098 + }
114099 +}
114100 +
114101 +/* Main recursive walk of the ipa pass: iterate over the collected interesting stmts in a function
114102 + * (they are interesting if they have an associated size_overflow asm stmt) and recursively walk
114103 + * the newly collected interesting functions (they are interesting if there is control flow between
114104 + * the interesting stmts and them).
114105 + */
114106 +static struct visited *handle_function(struct cgraph_node *node, struct next_cgraph_node *next_node, struct visited *visited)
114107 +{
114108 + struct interesting_node *head, *cur_node;
114109 + struct next_cgraph_node *cur_cnodes, *cnodes_head = NULL;
114110 +
114111 +#if BUILDING_GCC_VERSION <= 4007
114112 + set_current_function_decl(node->decl);
114113 +#else
114114 + set_current_function_decl(node->symbol.decl);
114115 +#endif
114116 + call_count = 0;
114117 +
114118 + head = collect_interesting_stmts(next_node);
114119 + for (cur_node = head; cur_node; cur_node = cur_node->next) {
114120 + if (is_visited_function(visited, cur_node))
114121 + continue;
114122 + cnodes_head = handle_interesting_stmt(cnodes_head, cur_node, node);
114123 + visited = insert_visited_function(visited, cur_node);
114124 + }
114125 +
114126 + free_interesting_node(head);
114127 + remove_all_size_overflow_asm();
114128 + unset_current_function_decl();
114129 +
114130 + for (cur_cnodes = cnodes_head; cur_cnodes; cur_cnodes = cur_cnodes->next)
114131 + visited = handle_function(cur_cnodes->current_function, cur_cnodes, visited);
114132 +
114133 + free_next_cgraph_node(cnodes_head);
114134 + return visited;
114135 +}
114136 +
114137 +static void free_visited(struct visited *head)
114138 +{
114139 + struct visited *cur;
114140 +
114141 + while (head) {
114142 + cur = head->next;
114143 + free(head);
114144 + head = cur;
114145 + }
114146 +}
114147 +
114148 +// erase the local flag
114149 +static void set_plf_false(void)
114150 +{
114151 + basic_block bb;
114152 +
114153 + FOR_ALL_BB(bb) {
114154 + gimple_stmt_iterator si;
114155 +
114156 + for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
114157 + gimple_set_plf(gsi_stmt(si), MY_STMT, false);
114158 + for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
114159 + gimple_set_plf(gsi_stmt(si), MY_STMT, false);
114160 + }
114161 +}
114162 +
114163 +#if BUILDING_GCC_VERSION <= 4006
114164 +static bool cgraph_function_with_gimple_body_p(struct cgraph_node *node)
114165 +{
114166 + return node->analyzed && !node->thunk.thunk_p && !node->alias;
114167 +}
114168 +
114169 +static struct cgraph_node *cgraph_first_function_with_gimple_body(void)
114170 +{
114171 + struct cgraph_node *node;
114172 +
114173 + for (node = cgraph_nodes; node; node = node->next) {
114174 + if (cgraph_function_with_gimple_body_p(node))
114175 + return node;
114176 + }
114177 + return NULL;
114178 +}
114179 +
114180 +static inline struct cgraph_node *cgraph_next_function_with_gimple_body(struct cgraph_node *node)
114181 +{
114182 + for (node = node->next; node; node = node->next) {
114183 + if (cgraph_function_with_gimple_body_p(node))
114184 + return node;
114185 + }
114186 + return NULL;
114187 +}
114188 +
114189 +#define FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) \
114190 + for ((node) = cgraph_first_function_with_gimple_body (); (node); \
114191 + (node) = cgraph_next_function_with_gimple_body (node))
114192 +
114193 +#endif
114194 +
114195 +// Main entry point of the ipa pass: erases the plf flag of all stmts and iterates over all the functions
114196 +static unsigned int search_function(void)
114197 +{
114198 + struct cgraph_node *node;
114199 + struct visited *visited = NULL;
114200 +
114201 + FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
114202 +#if BUILDING_GCC_VERSION <= 4007
114203 + set_current_function_decl(node->decl);
114204 +#else
114205 + set_current_function_decl(node->symbol.decl);
114206 +#endif
114207 + set_plf_false();
114208 + unset_current_function_decl();
114209 + }
114210 +
114211 + FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
114212 + gcc_assert(cgraph_function_flags_ready);
114213 +#if BUILDING_GCC_VERSION <= 4007
114214 + gcc_assert(node->reachable);
114215 +#endif
114216 +
114217 + visited = handle_function(node, NULL, visited);
114218 + }
114219 +
114220 + free_visited(visited);
114221 + return 0;
114222 +}
114223 +
114224 +static struct ipa_opt_pass_d pass_ipa = {
114225 + .pass = {
114226 + .type = SIMPLE_IPA_PASS,
114227 + .name = "size_overflow",
114228 +#if BUILDING_GCC_VERSION >= 4008
114229 + .optinfo_flags = OPTGROUP_NONE,
114230 +#endif
114231 + .gate = NULL,
114232 + .execute = search_function,
114233 + .sub = NULL,
114234 + .next = NULL,
114235 + .static_pass_number = 0,
114236 + .tv_id = TV_NONE,
114237 + .properties_required = 0,
114238 + .properties_provided = 0,
114239 + .properties_destroyed = 0,
114240 + .todo_flags_start = 0,
114241 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_ggc_collect | TODO_verify_flow | TODO_dump_cgraph | TODO_dump_func | TODO_update_ssa_no_phi,
114242 + },
114243 + .generate_summary = NULL,
114244 + .write_summary = NULL,
114245 + .read_summary = NULL,
114246 +#if BUILDING_GCC_VERSION >= 4006
114247 + .write_optimization_summary = NULL,
114248 + .read_optimization_summary = NULL,
114249 +#endif
114250 + .stmt_fixup = NULL,
114251 + .function_transform_todo_flags_start = 0,
114252 + .function_transform = NULL,
114253 + .variable_transform = NULL,
114254 +};
114255 +
114256 +// data for the size_overflow asm stmt
114257 +struct asm_data {
114258 + gimple def_stmt;
114259 + tree input;
114260 + tree output;
114261 +};
114262 +
114263 +#if BUILDING_GCC_VERSION <= 4007
114264 +static VEC(tree, gc) *create_asm_io_list(tree string, tree io)
114265 +#else
114266 +static vec<tree, va_gc> *create_asm_io_list(tree string, tree io)
114267 +#endif
114268 +{
114269 + tree list;
114270 +#if BUILDING_GCC_VERSION <= 4007
114271 + VEC(tree, gc) *vec_list = NULL;
114272 +#else
114273 + vec<tree, va_gc> *vec_list = NULL;
114274 +#endif
114275 +
114276 + list = build_tree_list(NULL_TREE, string);
114277 + list = chainon(NULL_TREE, build_tree_list(list, io));
114278 +#if BUILDING_GCC_VERSION <= 4007
114279 + VEC_safe_push(tree, gc, vec_list, list);
114280 +#else
114281 + vec_safe_push(vec_list, list);
114282 +#endif
114283 + return vec_list;
114284 +}
114285 +
114286 +static void create_asm_stmt(const char *str, tree str_input, tree str_output, struct asm_data *asm_data)
114287 +{
114288 + gimple asm_stmt;
114289 + gimple_stmt_iterator gsi;
114290 +#if BUILDING_GCC_VERSION <= 4007
114291 + VEC(tree, gc) *input, *output = NULL;
114292 +#else
114293 + vec<tree, va_gc> *input, *output = NULL;
114294 +#endif
114295 +
114296 + input = create_asm_io_list(str_input, asm_data->input);
114297 +
114298 + if (asm_data->output)
114299 + output = create_asm_io_list(str_output, asm_data->output);
114300 +
114301 + asm_stmt = gimple_build_asm_vec(str, input, output, NULL, NULL);
114302 + gsi = gsi_for_stmt(asm_data->def_stmt);
114303 + gsi_insert_after(&gsi, asm_stmt, GSI_NEW_STMT);
114304 +
114305 + if (asm_data->output)
114306 + SSA_NAME_DEF_STMT(asm_data->output) = asm_stmt;
114307 +}
114308 +
114309 +static void replace_call_lhs(const struct asm_data *asm_data)
114310 +{
114311 + gimple_set_lhs(asm_data->def_stmt, asm_data->input);
114312 + update_stmt(asm_data->def_stmt);
114313 + SSA_NAME_DEF_STMT(asm_data->input) = asm_data->def_stmt;
114314 +}
114315 +
114316 +static enum mark search_intentional_phi(struct pointer_set_t *visited, const_tree result)
114317 +{
114318 + enum mark cur_fndecl_attr;
114319 + gimple phi = get_def_stmt(result);
114320 + unsigned int i, n = gimple_phi_num_args(phi);
114321 +
114322 + pointer_set_insert(visited, phi);
114323 + for (i = 0; i < n; i++) {
114324 + tree arg = gimple_phi_arg_def(phi, i);
114325 +
114326 + cur_fndecl_attr = search_intentional(visited, arg);
114327 + if (cur_fndecl_attr != MARK_NO)
114328 + return cur_fndecl_attr;
114329 + }
114330 + return MARK_NO;
114331 +}
114332 +
114333 +static enum mark search_intentional_binary(struct pointer_set_t *visited, const_tree lhs)
114334 +{
114335 + enum mark cur_fndecl_attr;
114336 + const_tree rhs1, rhs2;
114337 + gimple def_stmt = get_def_stmt(lhs);
114338 +
114339 + rhs1 = gimple_assign_rhs1(def_stmt);
114340 + rhs2 = gimple_assign_rhs2(def_stmt);
114341 +
114342 + cur_fndecl_attr = search_intentional(visited, rhs1);
114343 + if (cur_fndecl_attr != MARK_NO)
114344 + return cur_fndecl_attr;
114345 + return search_intentional(visited, rhs2);
114346 +}
114347 +
114348 +// Look up the intentional_overflow attribute on the caller and the callee functions.
114349 +static enum mark search_intentional(struct pointer_set_t *visited, const_tree lhs)
114350 +{
114351 + const_gimple def_stmt;
114352 +
114353 + if (TREE_CODE(lhs) != SSA_NAME)
114354 + return get_intentional_attr_type(lhs);
114355 +
114356 + def_stmt = get_def_stmt(lhs);
114357 + if (!def_stmt)
114358 + return MARK_NO;
114359 +
114360 + if (pointer_set_contains(visited, def_stmt))
114361 + return MARK_NO;
114362 +
114363 + switch (gimple_code(def_stmt)) {
114364 + case GIMPLE_NOP:
114365 + return search_intentional(visited, SSA_NAME_VAR(lhs));
114366 + case GIMPLE_ASM:
114367 + if (is_size_overflow_intentional_asm_turn_off(def_stmt))
114368 + return MARK_TURN_OFF;
114369 + return MARK_NO;
114370 + case GIMPLE_CALL:
114371 + return MARK_NO;
114372 + case GIMPLE_PHI:
114373 + return search_intentional_phi(visited, lhs);
114374 + case GIMPLE_ASSIGN:
114375 + switch (gimple_num_ops(def_stmt)) {
114376 + case 2:
114377 + return search_intentional(visited, gimple_assign_rhs1(def_stmt));
114378 + case 3:
114379 + return search_intentional_binary(visited, lhs);
114380 + }
114381 + case GIMPLE_RETURN:
114382 + return MARK_NO;
114383 + default:
114384 + debug_gimple_stmt((gimple)def_stmt);
114385 + error("%s: unknown gimple code", __func__);
114386 + gcc_unreachable();
114387 + }
114388 +}
114389 +
114390 +// Check the intentional_overflow attribute and create the asm comment string for the size_overflow asm stmt.
114391 +static enum mark check_intentional_attribute_gimple(const_tree arg, const_gimple stmt, unsigned int argnum)
114392 +{
114393 + const_tree fndecl;
114394 + struct pointer_set_t *visited;
114395 + enum mark cur_fndecl_attr, decl_attr = MARK_NO;
114396 +
114397 + fndecl = get_interesting_orig_fndecl(stmt, argnum);
114398 + if (is_end_intentional_intentional_attr(fndecl, argnum))
114399 + decl_attr = MARK_NOT_INTENTIONAL;
114400 + else if (is_yes_intentional_attr(fndecl, argnum))
114401 + decl_attr = MARK_YES;
114402 + else if (is_turn_off_intentional_attr(fndecl) || is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl))) {
114403 + return MARK_TURN_OFF;
114404 + }
114405 +
114406 + visited = pointer_set_create();
114407 + cur_fndecl_attr = search_intentional(visited, arg);
114408 + pointer_set_destroy(visited);
114409 +
114410 + switch (cur_fndecl_attr) {
114411 + case MARK_NO:
114412 + return MARK_NO;
114413 + case MARK_TURN_OFF:
114414 + return MARK_TURN_OFF;
114415 + default:
114416 + print_missing_intentional(decl_attr, cur_fndecl_attr, fndecl, argnum);
114417 + return MARK_YES;
114418 + }
114419 +}
114420 +
114421 +static void check_missing_size_overflow_attribute(tree var)
114422 +{
114423 + tree orig_fndecl;
114424 + unsigned int num;
114425 +
114426 + if (is_a_return_check(var))
114427 + orig_fndecl = DECL_ORIGIN(var);
114428 + else
114429 + orig_fndecl = DECL_ORIGIN(current_function_decl);
114430 +
114431 + num = get_function_num(var, orig_fndecl);
114432 + if (num == CANNOT_FIND_ARG)
114433 + return;
114434 +
114435 + is_missing_function(orig_fndecl, num);
114436 +}
114437 +
114438 +static void search_size_overflow_attribute_phi(struct pointer_set_t *visited, const_tree result)
114439 +{
114440 + gimple phi = get_def_stmt(result);
114441 + unsigned int i, n = gimple_phi_num_args(phi);
114442 +
114443 + pointer_set_insert(visited, phi);
114444 + for (i = 0; i < n; i++) {
114445 + tree arg = gimple_phi_arg_def(phi, i);
114446 +
114447 + search_size_overflow_attribute(visited, arg);
114448 + }
114449 +}
114450 +
114451 +static void search_size_overflow_attribute_binary(struct pointer_set_t *visited, const_tree lhs)
114452 +{
114453 + const_gimple def_stmt = get_def_stmt(lhs);
114454 + tree rhs1, rhs2;
114455 +
114456 + rhs1 = gimple_assign_rhs1(def_stmt);
114457 + rhs2 = gimple_assign_rhs2(def_stmt);
114458 +
114459 + search_size_overflow_attribute(visited, rhs1);
114460 + search_size_overflow_attribute(visited, rhs2);
114461 +}
114462 +
114463 +static void search_size_overflow_attribute(struct pointer_set_t *visited, tree lhs)
114464 +{
114465 + const_gimple def_stmt;
114466 +
114467 + if (TREE_CODE(lhs) == PARM_DECL) {
114468 + check_missing_size_overflow_attribute(lhs);
114469 + return;
114470 + }
114471 +
114472 + def_stmt = get_def_stmt(lhs);
114473 + if (!def_stmt)
114474 + return;
114475 +
114476 + if (pointer_set_insert(visited, def_stmt))
114477 + return;
114478 +
114479 + switch (gimple_code(def_stmt)) {
114480 + case GIMPLE_NOP:
114481 + return search_size_overflow_attribute(visited, SSA_NAME_VAR(lhs));
114482 + case GIMPLE_ASM:
114483 + return;
114484 + case GIMPLE_CALL: {
114485 + tree fndecl = gimple_call_fndecl(def_stmt);
114486 +
114487 + if (fndecl == NULL_TREE)
114488 + return;
114489 + check_missing_size_overflow_attribute(fndecl);
114490 + return;
114491 + }
114492 + case GIMPLE_PHI:
114493 + return search_size_overflow_attribute_phi(visited, lhs);
114494 + case GIMPLE_ASSIGN:
114495 + switch (gimple_num_ops(def_stmt)) {
114496 + case 2:
114497 + return search_size_overflow_attribute(visited, gimple_assign_rhs1(def_stmt));
114498 + case 3:
114499 + return search_size_overflow_attribute_binary(visited, lhs);
114500 + }
114501 + default:
114502 + debug_gimple_stmt((gimple)def_stmt);
114503 + error("%s: unknown gimple code", __func__);
114504 + gcc_unreachable();
114505 + }
114506 +}
114507 +
114508 +// Search missing entries in the hash table (invoked from the gimple pass)
114509 +static void search_missing_size_overflow_attribute_gimple(const_gimple stmt, unsigned int num)
114510 +{
114511 + tree fndecl = NULL_TREE;
114512 + tree lhs;
114513 + struct pointer_set_t *visited;
114514 +
114515 + if (is_turn_off_intentional_attr(DECL_ORIGIN(current_function_decl)))
114516 + return;
114517 +
114518 + if (num == 0) {
114519 + gcc_assert(gimple_code(stmt) == GIMPLE_RETURN);
114520 + lhs = gimple_return_retval(stmt);
114521 + } else {
114522 + gcc_assert(is_gimple_call(stmt));
114523 + lhs = gimple_call_arg(stmt, num - 1);
114524 + fndecl = gimple_call_fndecl(stmt);
114525 + }
114526 +
114527 + if (fndecl != NULL_TREE && is_turn_off_intentional_attr(DECL_ORIGIN(fndecl)))
114528 + return;
114529 +
114530 + visited = pointer_set_create();
114531 + search_size_overflow_attribute(visited, lhs);
114532 + pointer_set_destroy(visited);
114533 +}
114534 +
114535 +static void create_output_from_phi(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
114536 +{
114537 + gimple_stmt_iterator gsi;
114538 + gimple assign;
114539 +
114540 + assign = gimple_build_assign(asm_data->input, asm_data->output);
114541 + gsi = gsi_for_stmt(stmt);
114542 + gsi_insert_before(&gsi, assign, GSI_NEW_STMT);
114543 + asm_data->def_stmt = assign;
114544 +
114545 + asm_data->output = create_new_var(TREE_TYPE(asm_data->output));
114546 + asm_data->output = make_ssa_name(asm_data->output, stmt);
114547 + if (gimple_code(stmt) == GIMPLE_RETURN)
114548 + gimple_return_set_retval(stmt, asm_data->output);
114549 + else
114550 + gimple_call_set_arg(stmt, argnum - 1, asm_data->output);
114551 + update_stmt(stmt);
114552 +}
114553 +
114554 +static const char *convert_mark_to_str(enum mark mark)
114555 +{
114556 + switch (mark) {
114557 + case MARK_NO:
114558 + return OK_ASM_STR;
114559 + case MARK_YES:
114560 + case MARK_NOT_INTENTIONAL:
114561 + return YES_ASM_STR;
114562 + case MARK_TURN_OFF:
114563 + return TURN_OFF_ASM_STR;
114564 + }
114565 +
114566 + gcc_unreachable();
114567 +}
114568 +
114569 +/* Create the input of the size_overflow asm stmt.
114570 + * When the arg of the callee function is a parm_decl it creates this kind of size_overflow asm stmt:
114571 + * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
114572 + * The input field in asm_data will be empty if there is no need for further size_overflow asm stmt insertion.
114573 + * otherwise create the input (for a phi stmt the output too) of the asm stmt.
114574 + */
114575 +static void create_asm_input(gimple stmt, unsigned int argnum, struct asm_data *asm_data)
114576 +{
114577 + if (!asm_data->def_stmt) {
114578 + asm_data->input = NULL_TREE;
114579 + return;
114580 + }
114581 +
114582 + gcc_assert(!is_size_overflow_intentional_asm_turn_off(asm_data->def_stmt));
114583 +
114584 + asm_data->input = create_new_var(TREE_TYPE(asm_data->output));
114585 + asm_data->input = make_ssa_name(asm_data->input, asm_data->def_stmt);
114586 +
114587 + switch (gimple_code(asm_data->def_stmt)) {
114588 + case GIMPLE_ASSIGN:
114589 + case GIMPLE_CALL:
114590 + replace_call_lhs(asm_data);
114591 + break;
114592 + case GIMPLE_PHI:
114593 + create_output_from_phi(stmt, argnum, asm_data);
114594 + break;
114595 + case GIMPLE_NOP: {
114596 + enum mark mark;
114597 + const char *str;
114598 +
114599 + mark = check_intentional_attribute_gimple(asm_data->output, stmt, argnum);
114600 + str = convert_mark_to_str(mark);
114601 +
114602 + asm_data->input = asm_data->output;
114603 + asm_data->output = NULL;
114604 + asm_data->def_stmt = stmt;
114605 +
114606 + create_asm_stmt(str, build_string(2, "rm"), NULL, asm_data);
114607 + asm_data->input = NULL_TREE;
114608 + break;
114609 + }
114610 + case GIMPLE_ASM:
114611 + if (is_size_overflow_asm(asm_data->def_stmt)) {
114612 + asm_data->input = NULL_TREE;
114613 + break;
114614 + }
114615 + default:
114616 + debug_gimple_stmt(asm_data->def_stmt);
114617 + gcc_unreachable();
114618 + }
114619 +}
114620 +
114621 +/* This is the gimple part of searching for a missing size_overflow attribute. If the intentional_overflow attribute type
114622 + * is of the right kind create the appropriate size_overflow asm stmts:
114623 + * __asm__("# size_overflow" : =rm" D.3344_8 : "0" cicus.4_16);
114624 + * __asm__("# size_overflow MARK_YES" : : "rm" size_1(D));
114625 + */
114626 +static void create_size_overflow_asm(gimple stmt, tree output_node, unsigned int argnum)
114627 +{
114628 + struct asm_data asm_data;
114629 + const char *str;
114630 + enum mark mark;
114631 +
114632 + if (is_gimple_constant(output_node))
114633 + return;
114634 +
114635 + asm_data.output = output_node;
114636 + mark = check_intentional_attribute_gimple(asm_data.output, stmt, argnum);
114637 + if (mark == MARK_TURN_OFF)
114638 + return;
114639 +
114640 + search_missing_size_overflow_attribute_gimple(stmt, argnum);
114641 +
114642 + asm_data.def_stmt = get_def_stmt(asm_data.output);
114643 + create_asm_input(stmt, argnum, &asm_data);
114644 + if (asm_data.input == NULL_TREE)
114645 + return;
114646 +
114647 + str = convert_mark_to_str(mark);
114648 + create_asm_stmt(str, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
114649 +}
114650 +
114651 +// Determine the return value and insert the asm stmt to mark the return stmt.
114652 +static void insert_asm_ret(gimple stmt)
114653 +{
114654 + tree ret;
114655 +
114656 + ret = gimple_return_retval(stmt);
114657 + create_size_overflow_asm(stmt, ret, 0);
114658 +}
114659 +
114660 +// Determine the correct arg index and arg and insert the asm stmt to mark the stmt.
114661 +static void insert_asm_arg(gimple stmt, unsigned int orig_argnum)
114662 +{
114663 + tree arg;
114664 + unsigned int argnum;
114665 +
114666 + argnum = get_correct_arg_count(orig_argnum, gimple_call_fndecl(stmt));
114667 + gcc_assert(argnum != 0);
114668 + if (argnum == CANNOT_FIND_ARG)
114669 + return;
114670 +
114671 + arg = gimple_call_arg(stmt, argnum - 1);
114672 + gcc_assert(arg != NULL_TREE);
114673 + create_size_overflow_asm(stmt, arg, argnum);
114674 +}
114675 +
114676 +// If a function arg or the return value is marked by the size_overflow attribute then set its index in the array.
114677 +static void set_argnum_attribute(const_tree attr, bool *argnums)
114678 +{
114679 + unsigned int argnum;
114680 + tree attr_value;
114681 +
114682 + for (attr_value = TREE_VALUE(attr); attr_value; attr_value = TREE_CHAIN(attr_value)) {
114683 + argnum = TREE_INT_CST_LOW(TREE_VALUE(attr_value));
114684 + argnums[argnum] = true;
114685 + }
114686 +}
114687 +
114688 +// If a function arg or the return value is in the hash table then set its index in the array.
114689 +static void set_argnum_hash(tree fndecl, bool *argnums)
114690 +{
114691 + unsigned int num;
114692 + const struct size_overflow_hash *hash;
114693 +
114694 + hash = get_function_hash(DECL_ORIGIN(fndecl));
114695 + if (!hash)
114696 + return;
114697 +
114698 + for (num = 0; num <= MAX_PARAM; num++) {
114699 + if (!(hash->param & (1U << num)))
114700 + continue;
114701 +
114702 + argnums[num] = true;
114703 + }
114704 +}
114705 +
114706 +static bool is_all_the_argnums_empty(bool *argnums)
114707 +{
114708 + unsigned int i;
114709 +
114710 + for (i = 0; i <= MAX_PARAM; i++)
114711 + if (argnums[i])
114712 + return false;
114713 + return true;
114714 +}
114715 +
114716 +// Check whether the arguments or the return value of the function are in the hash table or are marked by the size_overflow attribute.
114717 +static void search_interesting_args(tree fndecl, bool *argnums)
114718 +{
114719 + const_tree attr;
114720 +
114721 + set_argnum_hash(fndecl, argnums);
114722 + if (!is_all_the_argnums_empty(argnums))
114723 + return;
114724 +
114725 + attr = lookup_attribute("size_overflow", DECL_ATTRIBUTES(fndecl));
114726 + if (attr && TREE_VALUE(attr))
114727 + set_argnum_attribute(attr, argnums);
114728 +}
114729 +
114730 +/*
114731 + * Look up the intentional_overflow attribute that turns off ipa based duplication
114732 + * on the callee function, if found insert an asm stmt with "MARK_TURN_OFF".
114733 + */
114734 +static bool create_mark_turn_off_asm(gimple stmt)
114735 +{
114736 + enum mark mark;
114737 + struct asm_data asm_data;
114738 + const_tree fndecl = gimple_call_fndecl(stmt);
114739 +
114740 + mark = get_intentional_attr_type(DECL_ORIGIN(fndecl));
114741 + if (mark != MARK_TURN_OFF)
114742 + return false;
114743 +
114744 + asm_data.def_stmt = stmt;
114745 + asm_data.output = gimple_call_lhs(stmt);
114746 +
114747 + if (asm_data.output == NULL_TREE) {
114748 + asm_data.input = gimple_call_arg(stmt, 0);
114749 + if (is_gimple_constant(asm_data.input))
114750 + return false;
114751 + asm_data.output = NULL;
114752 + create_asm_stmt(TURN_OFF_ASM_STR, build_string(2, "rm"), NULL, &asm_data);
114753 + return true;
114754 + }
114755 +
114756 + create_asm_input(stmt, 0, &asm_data);
114757 + gcc_assert(asm_data.input != NULL_TREE);
114758 +
114759 + create_asm_stmt(TURN_OFF_ASM_STR, build_string(1, "0"), build_string(3, "=rm"), &asm_data);
114760 + return true;
114761 +}
114762 +
114763 +// If the argument(s) of the callee function is/are in the hash table or are marked by an attribute then mark the call stmt with an asm stmt
114764 +static void handle_interesting_function(gimple stmt)
114765 +{
114766 + unsigned int argnum;
114767 + tree fndecl;
114768 + bool orig_argnums[MAX_PARAM + 1] = {false};
114769 +
114770 + if (gimple_call_num_args(stmt) == 0)
114771 + return;
114772 + fndecl = gimple_call_fndecl(stmt);
114773 + if (fndecl == NULL_TREE)
114774 + return;
114775 + fndecl = DECL_ORIGIN(fndecl);
114776 +
114777 + if (create_mark_turn_off_asm(stmt))
114778 + return;
114779 +
114780 + search_interesting_args(fndecl, orig_argnums);
114781 +
114782 + for (argnum = 1; argnum < MAX_PARAM; argnum++)
114783 + if (orig_argnums[argnum])
114784 + insert_asm_arg(stmt, argnum);
114785 +}
114786 +
114787 +// If the return value of the caller function is in hash table (its index is 0) then mark the return stmt with an asm stmt
114788 +static void handle_interesting_ret(gimple stmt)
114789 +{
114790 + bool orig_argnums[MAX_PARAM + 1] = {false};
114791 +
114792 + search_interesting_args(current_function_decl, orig_argnums);
114793 +
114794 + if (orig_argnums[0])
114795 + insert_asm_ret(stmt);
114796 +}
114797 +
114798 +// Iterate over all the stmts and search for call and return stmts and mark them if they're in the hash table
114799 +static unsigned int search_interesting_functions(void)
114800 +{
114801 + basic_block bb;
114802 +
114803 + FOR_ALL_BB(bb) {
114804 + gimple_stmt_iterator gsi;
114805 +
114806 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
114807 + gimple stmt = gsi_stmt(gsi);
114808 +
114809 + if (is_size_overflow_asm(stmt))
114810 + continue;
114811 +
114812 + if (is_gimple_call(stmt))
114813 + handle_interesting_function(stmt);
114814 + else if (gimple_code(stmt) == GIMPLE_RETURN)
114815 + handle_interesting_ret(stmt);
114816 + }
114817 + }
114818 + return 0;
114819 +}
114820 +
114821 +/*
114822 + * A lot of functions get inlined before the ipa passes so after the build_ssa gimple pass
114823 + * this pass inserts asm stmts to mark the interesting args
114824 + * that the ipa pass will detect and insert the size overflow checks for.
114825 + */
114826 +static struct gimple_opt_pass insert_size_overflow_asm_pass = {
114827 + .pass = {
114828 + .type = GIMPLE_PASS,
114829 + .name = "insert_size_overflow_asm",
114830 +#if BUILDING_GCC_VERSION >= 4008
114831 + .optinfo_flags = OPTGROUP_NONE,
114832 +#endif
114833 + .gate = NULL,
114834 + .execute = search_interesting_functions,
114835 + .sub = NULL,
114836 + .next = NULL,
114837 + .static_pass_number = 0,
114838 + .tv_id = TV_NONE,
114839 + .properties_required = PROP_cfg,
114840 + .properties_provided = 0,
114841 + .properties_destroyed = 0,
114842 + .todo_flags_start = 0,
114843 + .todo_flags_finish = TODO_dump_func | TODO_verify_ssa | TODO_verify_stmts | TODO_remove_unused_locals | TODO_update_ssa_no_phi | TODO_cleanup_cfg | TODO_ggc_collect | TODO_verify_flow
114844 + }
114845 +};
114846 +
114847 +// Create the noreturn report_size_overflow() function decl.
114848 +static void start_unit_callback(void __unused *gcc_data, void __unused *user_data)
114849 +{
114850 + tree fntype;
114851 +
114852 + const_char_ptr_type_node = build_pointer_type(build_type_variant(char_type_node, 1, 0));
114853 +
114854 + // void report_size_overflow(const char *loc_file, unsigned int loc_line, const char *current_func, const char *ssa_var)
114855 + fntype = build_function_type_list(void_type_node,
114856 + const_char_ptr_type_node,
114857 + unsigned_type_node,
114858 + const_char_ptr_type_node,
114859 + const_char_ptr_type_node,
114860 + NULL_TREE);
114861 + report_size_overflow_decl = build_fn_decl("report_size_overflow", fntype);
114862 +
114863 + DECL_ASSEMBLER_NAME(report_size_overflow_decl);
114864 + TREE_PUBLIC(report_size_overflow_decl) = 1;
114865 + DECL_EXTERNAL(report_size_overflow_decl) = 1;
114866 + DECL_ARTIFICIAL(report_size_overflow_decl) = 1;
114867 + TREE_THIS_VOLATILE(report_size_overflow_decl) = 1;
114868 +}
114869 +
114870 +static unsigned int dump_functions(void)
114871 +{
114872 + struct cgraph_node *node;
114873 +
114874 + FOR_EACH_FUNCTION_WITH_GIMPLE_BODY(node) {
114875 + basic_block bb;
114876 +
114877 +#if BUILDING_GCC_VERSION <= 4007
114878 + push_cfun(DECL_STRUCT_FUNCTION(node->decl));
114879 + current_function_decl = node->decl;
114880 +#else
114881 + push_cfun(DECL_STRUCT_FUNCTION(node->symbol.decl));
114882 + current_function_decl = node->symbol.decl;
114883 +#endif
114884 +
114885 + fprintf(stderr, "-----------------------------------------\n%s\n-----------------------------------------\n", NAME(current_function_decl));
114886 +
114887 + FOR_ALL_BB(bb) {
114888 + gimple_stmt_iterator si;
114889 +
114890 + fprintf(stderr, "<bb %u>:\n", bb->index);
114891 + for (si = gsi_start_phis(bb); !gsi_end_p(si); gsi_next(&si))
114892 + debug_gimple_stmt(gsi_stmt(si));
114893 + for (si = gsi_start_bb(bb); !gsi_end_p(si); gsi_next(&si))
114894 + debug_gimple_stmt(gsi_stmt(si));
114895 + fprintf(stderr, "\n");
114896 + }
114897 +
114898 + fprintf(stderr, "-------------------------------------------------------------------------\n");
114899 +
114900 + pop_cfun();
114901 + current_function_decl = NULL_TREE;
114902 + }
114903 +
114904 + fprintf(stderr, "###############################################################################\n");
114905 +
114906 + return 0;
114907 +}
114908 +
114909 +static struct ipa_opt_pass_d pass_dump = {
114910 + .pass = {
114911 + .type = SIMPLE_IPA_PASS,
114912 + .name = "dump",
114913 +#if BUILDING_GCC_VERSION >= 4008
114914 + .optinfo_flags = OPTGROUP_NONE,
114915 +#endif
114916 + .gate = NULL,
114917 + .execute = dump_functions,
114918 + .sub = NULL,
114919 + .next = NULL,
114920 + .static_pass_number = 0,
114921 + .tv_id = TV_NONE,
114922 + .properties_required = 0,
114923 + .properties_provided = 0,
114924 + .properties_destroyed = 0,
114925 + .todo_flags_start = 0,
114926 + .todo_flags_finish = 0,
114927 + },
114928 + .generate_summary = NULL,
114929 + .write_summary = NULL,
114930 + .read_summary = NULL,
114931 +#if BUILDING_GCC_VERSION >= 4006
114932 + .write_optimization_summary = NULL,
114933 + .read_optimization_summary = NULL,
114934 +#endif
114935 + .stmt_fixup = NULL,
114936 + .function_transform_todo_flags_start = 0,
114937 + .function_transform = NULL,
114938 + .variable_transform = NULL,
114939 +};
114940 +
114941 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
114942 +{
114943 + int i;
114944 + const char * const plugin_name = plugin_info->base_name;
114945 + const int argc = plugin_info->argc;
114946 + const struct plugin_argument * const argv = plugin_info->argv;
114947 + bool enable = true;
114948 +
114949 + struct register_pass_info insert_size_overflow_asm_pass_info = {
114950 + .pass = &insert_size_overflow_asm_pass.pass,
114951 + .reference_pass_name = "ssa",
114952 + .ref_pass_instance_number = 1,
114953 + .pos_op = PASS_POS_INSERT_AFTER
114954 + };
114955 +
114956 + struct register_pass_info __unused dump_before_pass_info = {
114957 + .pass = &pass_dump.pass,
114958 + .reference_pass_name = "increase_alignment",
114959 + .ref_pass_instance_number = 1,
114960 + .pos_op = PASS_POS_INSERT_BEFORE
114961 + };
114962 +
114963 + struct register_pass_info ipa_pass_info = {
114964 + .pass = &pass_ipa.pass,
114965 + .reference_pass_name = "increase_alignment",
114966 + .ref_pass_instance_number = 1,
114967 + .pos_op = PASS_POS_INSERT_BEFORE
114968 + };
114969 +
114970 + struct register_pass_info __unused dump_after_pass_info = {
114971 + .pass = &pass_dump.pass,
114972 + .reference_pass_name = "increase_alignment",
114973 + .ref_pass_instance_number = 1,
114974 + .pos_op = PASS_POS_INSERT_BEFORE
114975 + };
114976 +
114977 + if (!plugin_default_version_check(version, &gcc_version)) {
114978 + error(G_("incompatible gcc/plugin versions"));
114979 + return 1;
114980 + }
114981 +
114982 + for (i = 0; i < argc; ++i) {
114983 + if (!strcmp(argv[i].key, "no-size-overflow")) {
114984 + enable = false;
114985 + continue;
114986 + }
114987 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
114988 + }
114989 +
114990 + register_callback(plugin_name, PLUGIN_INFO, NULL, &size_overflow_plugin_info);
114991 + if (enable) {
114992 + register_callback("start_unit", PLUGIN_START_UNIT, &start_unit_callback, NULL);
114993 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &insert_size_overflow_asm_pass_info);
114994 +// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_before_pass_info);
114995 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &ipa_pass_info);
114996 +// register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &dump_after_pass_info);
114997 + }
114998 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
114999 +
115000 + return 0;
115001 +}
115002 diff --git a/tools/gcc/stackleak_plugin.c b/tools/gcc/stackleak_plugin.c
115003 new file mode 100644
115004 index 0000000..ac2901e
115005 --- /dev/null
115006 +++ b/tools/gcc/stackleak_plugin.c
115007 @@ -0,0 +1,327 @@
115008 +/*
115009 + * Copyright 2011-2013 by the PaX Team <pageexec@freemail.hu>
115010 + * Licensed under the GPL v2
115011 + *
115012 + * Note: the choice of the license means that the compilation process is
115013 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
115014 + * but for the kernel it doesn't matter since it doesn't link against
115015 + * any of the gcc libraries
115016 + *
115017 + * gcc plugin to help implement various PaX features
115018 + *
115019 + * - track lowest stack pointer
115020 + *
115021 + * TODO:
115022 + * - initialize all local variables
115023 + *
115024 + * BUGS:
115025 + * - none known
115026 + */
115027 +#include "gcc-plugin.h"
115028 +#include "config.h"
115029 +#include "system.h"
115030 +#include "coretypes.h"
115031 +#include "tree.h"
115032 +#include "tree-pass.h"
115033 +#include "flags.h"
115034 +#include "intl.h"
115035 +#include "toplev.h"
115036 +#include "plugin.h"
115037 +//#include "expr.h" where are you...
115038 +#include "diagnostic.h"
115039 +#include "plugin-version.h"
115040 +#include "tm.h"
115041 +#include "function.h"
115042 +#include "basic-block.h"
115043 +#include "gimple.h"
115044 +#include "rtl.h"
115045 +#include "emit-rtl.h"
115046 +
115047 +#if BUILDING_GCC_VERSION >= 4008
115048 +#define TODO_dump_func 0
115049 +#endif
115050 +
115051 +extern void print_gimple_stmt(FILE *, gimple, int, int);
115052 +
115053 +int plugin_is_GPL_compatible;
115054 +
115055 +static int track_frame_size = -1;
115056 +static const char track_function[] = "pax_track_stack";
115057 +static const char check_function[] = "pax_check_alloca";
115058 +static bool init_locals;
115059 +
115060 +static struct plugin_info stackleak_plugin_info = {
115061 + .version = "201302112000",
115062 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
115063 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
115064 +};
115065 +
115066 +static bool gate_stackleak_track_stack(void);
115067 +static unsigned int execute_stackleak_tree_instrument(void);
115068 +static unsigned int execute_stackleak_final(void);
115069 +
115070 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
115071 + .pass = {
115072 + .type = GIMPLE_PASS,
115073 + .name = "stackleak_tree_instrument",
115074 +#if BUILDING_GCC_VERSION >= 4008
115075 + .optinfo_flags = OPTGROUP_NONE,
115076 +#endif
115077 + .gate = gate_stackleak_track_stack,
115078 + .execute = execute_stackleak_tree_instrument,
115079 + .sub = NULL,
115080 + .next = NULL,
115081 + .static_pass_number = 0,
115082 + .tv_id = TV_NONE,
115083 + .properties_required = PROP_gimple_leh | PROP_cfg,
115084 + .properties_provided = 0,
115085 + .properties_destroyed = 0,
115086 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
115087 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_update_ssa
115088 + }
115089 +};
115090 +
115091 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
115092 + .pass = {
115093 + .type = RTL_PASS,
115094 + .name = "stackleak_final",
115095 +#if BUILDING_GCC_VERSION >= 4008
115096 + .optinfo_flags = OPTGROUP_NONE,
115097 +#endif
115098 + .gate = gate_stackleak_track_stack,
115099 + .execute = execute_stackleak_final,
115100 + .sub = NULL,
115101 + .next = NULL,
115102 + .static_pass_number = 0,
115103 + .tv_id = TV_NONE,
115104 + .properties_required = 0,
115105 + .properties_provided = 0,
115106 + .properties_destroyed = 0,
115107 + .todo_flags_start = 0,
115108 + .todo_flags_finish = TODO_dump_func
115109 + }
115110 +};
115111 +
115112 +static bool gate_stackleak_track_stack(void)
115113 +{
115114 + return track_frame_size >= 0;
115115 +}
115116 +
115117 +static void stackleak_check_alloca(gimple_stmt_iterator *gsi)
115118 +{
115119 + gimple check_alloca;
115120 + tree fntype, fndecl, alloca_size;
115121 +
115122 + fntype = build_function_type_list(void_type_node, long_unsigned_type_node, NULL_TREE);
115123 + fndecl = build_fn_decl(check_function, fntype);
115124 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
115125 +
115126 + // insert call to void pax_check_alloca(unsigned long size)
115127 + alloca_size = gimple_call_arg(gsi_stmt(*gsi), 0);
115128 + check_alloca = gimple_build_call(fndecl, 1, alloca_size);
115129 + gsi_insert_before(gsi, check_alloca, GSI_SAME_STMT);
115130 +}
115131 +
115132 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi)
115133 +{
115134 + gimple track_stack;
115135 + tree fntype, fndecl;
115136 +
115137 + fntype = build_function_type_list(void_type_node, NULL_TREE);
115138 + fndecl = build_fn_decl(track_function, fntype);
115139 + DECL_ASSEMBLER_NAME(fndecl); // for LTO
115140 +
115141 + // insert call to void pax_track_stack(void)
115142 + track_stack = gimple_build_call(fndecl, 0);
115143 + gsi_insert_after(gsi, track_stack, GSI_CONTINUE_LINKING);
115144 +}
115145 +
115146 +#if BUILDING_GCC_VERSION == 4005
115147 +static bool gimple_call_builtin_p(gimple stmt, enum built_in_function code)
115148 +{
115149 + tree fndecl;
115150 +
115151 + if (!is_gimple_call(stmt))
115152 + return false;
115153 + fndecl = gimple_call_fndecl(stmt);
115154 + if (!fndecl)
115155 + return false;
115156 + if (DECL_BUILT_IN_CLASS(fndecl) != BUILT_IN_NORMAL)
115157 + return false;
115158 +// print_node(stderr, "pax", fndecl, 4);
115159 + return DECL_FUNCTION_CODE(fndecl) == code;
115160 +}
115161 +#endif
115162 +
115163 +static bool is_alloca(gimple stmt)
115164 +{
115165 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA))
115166 + return true;
115167 +
115168 +#if BUILDING_GCC_VERSION >= 4007
115169 + if (gimple_call_builtin_p(stmt, BUILT_IN_ALLOCA_WITH_ALIGN))
115170 + return true;
115171 +#endif
115172 +
115173 + return false;
115174 +}
115175 +
115176 +static unsigned int execute_stackleak_tree_instrument(void)
115177 +{
115178 + basic_block bb, entry_bb;
115179 + bool prologue_instrumented = false, is_leaf = true;
115180 +
115181 + entry_bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
115182 +
115183 + // 1. loop through BBs and GIMPLE statements
115184 + FOR_EACH_BB(bb) {
115185 + gimple_stmt_iterator gsi;
115186 +
115187 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
115188 + gimple stmt;
115189 +
115190 + stmt = gsi_stmt(gsi);
115191 +
115192 + if (is_gimple_call(stmt))
115193 + is_leaf = false;
115194 +
115195 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
115196 + if (!is_alloca(stmt))
115197 + continue;
115198 +
115199 + // 2. insert stack overflow check before each __builtin_alloca call
115200 + stackleak_check_alloca(&gsi);
115201 +
115202 + // 3. insert track call after each __builtin_alloca call
115203 + stackleak_add_instrumentation(&gsi);
115204 + if (bb == entry_bb)
115205 + prologue_instrumented = true;
115206 + }
115207 + }
115208 +
115209 + // special cases for some bad linux code: taking the address of static inline functions will materialize them
115210 + // but we mustn't instrument some of them as the resulting stack alignment required by the function call ABI
115211 + // will break other assumptions regarding the expected (but not otherwise enforced) register clobbering ABI.
115212 + // case in point: native_save_fl on amd64 when optimized for size clobbers rdx if it were instrumented here.
115213 + if (is_leaf && !TREE_PUBLIC(current_function_decl) && DECL_DECLARED_INLINE_P(current_function_decl))
115214 + return 0;
115215 + if (is_leaf && !strncmp(IDENTIFIER_POINTER(DECL_NAME(current_function_decl)), "_paravirt_", 10))
115216 + return 0;
115217 +
115218 + // 4. insert track call at the beginning
115219 + if (!prologue_instrumented) {
115220 + gimple_stmt_iterator gsi;
115221 +
115222 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
115223 + if (dom_info_available_p(CDI_DOMINATORS))
115224 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
115225 + gsi = gsi_start_bb(bb);
115226 + stackleak_add_instrumentation(&gsi);
115227 + }
115228 +
115229 + return 0;
115230 +}
115231 +
115232 +static unsigned int execute_stackleak_final(void)
115233 +{
115234 + rtx insn, next;
115235 +
115236 + if (cfun->calls_alloca)
115237 + return 0;
115238 +
115239 + // keep calls only if function frame is big enough
115240 + if (get_frame_size() >= track_frame_size)
115241 + return 0;
115242 +
115243 + // 1. find pax_track_stack calls
115244 + for (insn = get_insns(); insn; insn = next) {
115245 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
115246 + rtx body;
115247 +
115248 + next = NEXT_INSN(insn);
115249 + if (!CALL_P(insn))
115250 + continue;
115251 + body = PATTERN(insn);
115252 + if (GET_CODE(body) != CALL)
115253 + continue;
115254 + body = XEXP(body, 0);
115255 + if (GET_CODE(body) != MEM)
115256 + continue;
115257 + body = XEXP(body, 0);
115258 + if (GET_CODE(body) != SYMBOL_REF)
115259 + continue;
115260 + if (strcmp(XSTR(body, 0), track_function))
115261 + continue;
115262 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
115263 + // 2. delete call
115264 + delete_insn_and_edges(insn);
115265 +#if BUILDING_GCC_VERSION >= 4007
115266 + if (GET_CODE(next) == NOTE && NOTE_KIND(next) == NOTE_INSN_CALL_ARG_LOCATION) {
115267 + insn = next;
115268 + next = NEXT_INSN(insn);
115269 + delete_insn_and_edges(insn);
115270 + }
115271 +#endif
115272 + }
115273 +
115274 +// print_simple_rtl(stderr, get_insns());
115275 +// print_rtl(stderr, get_insns());
115276 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
115277 +
115278 + return 0;
115279 +}
115280 +
115281 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
115282 +{
115283 + const char * const plugin_name = plugin_info->base_name;
115284 + const int argc = plugin_info->argc;
115285 + const struct plugin_argument * const argv = plugin_info->argv;
115286 + int i;
115287 + struct register_pass_info stackleak_tree_instrument_pass_info = {
115288 + .pass = &stackleak_tree_instrument_pass.pass,
115289 +// .reference_pass_name = "tree_profile",
115290 + .reference_pass_name = "optimized",
115291 + .ref_pass_instance_number = 1,
115292 + .pos_op = PASS_POS_INSERT_BEFORE
115293 + };
115294 + struct register_pass_info stackleak_final_pass_info = {
115295 + .pass = &stackleak_final_rtl_opt_pass.pass,
115296 + .reference_pass_name = "final",
115297 + .ref_pass_instance_number = 1,
115298 + .pos_op = PASS_POS_INSERT_BEFORE
115299 + };
115300 +
115301 + if (!plugin_default_version_check(version, &gcc_version)) {
115302 + error(G_("incompatible gcc/plugin versions"));
115303 + return 1;
115304 + }
115305 +
115306 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
115307 +
115308 + for (i = 0; i < argc; ++i) {
115309 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
115310 + if (!argv[i].value) {
115311 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
115312 + continue;
115313 + }
115314 + track_frame_size = atoi(argv[i].value);
115315 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
115316 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
115317 + continue;
115318 + }
115319 + if (!strcmp(argv[i].key, "initialize-locals")) {
115320 + if (argv[i].value) {
115321 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
115322 + continue;
115323 + }
115324 + init_locals = true;
115325 + continue;
115326 + }
115327 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
115328 + }
115329 +
115330 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
115331 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
115332 +
115333 + return 0;
115334 +}
115335 diff --git a/tools/gcc/structleak_plugin.c b/tools/gcc/structleak_plugin.c
115336 new file mode 100644
115337 index 0000000..4fae911
115338 --- /dev/null
115339 +++ b/tools/gcc/structleak_plugin.c
115340 @@ -0,0 +1,277 @@
115341 +/*
115342 + * Copyright 2013 by PaX Team <pageexec@freemail.hu>
115343 + * Licensed under the GPL v2
115344 + *
115345 + * Note: the choice of the license means that the compilation process is
115346 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
115347 + * but for the kernel it doesn't matter since it doesn't link against
115348 + * any of the gcc libraries
115349 + *
115350 + * gcc plugin to forcibly initialize certain local variables that could
115351 + * otherwise leak kernel stack to userland if they aren't properly initialized
115352 + * by later code
115353 + *
115354 + * Homepage: http://pax.grsecurity.net/
115355 + *
115356 + * Usage:
115357 + * $ # for 4.5/4.6/C based 4.7
115358 + * $ gcc -I`gcc -print-file-name=plugin`/include -I`gcc -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
115359 + * $ # for C++ based 4.7/4.8+
115360 + * $ g++ -I`g++ -print-file-name=plugin`/include -I`g++ -print-file-name=plugin`/include/c-family -fPIC -shared -O2 -o structleak_plugin.so structleak_plugin.c
115361 + * $ gcc -fplugin=./structleak_plugin.so test.c -O2
115362 + *
115363 + * TODO: eliminate redundant initializers
115364 + * increase type coverage
115365 + */
115366 +
115367 +#include "gcc-plugin.h"
115368 +#include "config.h"
115369 +#include "system.h"
115370 +#include "coretypes.h"
115371 +#include "tree.h"
115372 +#include "tree-pass.h"
115373 +#include "intl.h"
115374 +#include "plugin-version.h"
115375 +#include "tm.h"
115376 +#include "toplev.h"
115377 +#include "function.h"
115378 +#include "tree-flow.h"
115379 +#include "plugin.h"
115380 +#include "gimple.h"
115381 +#include "diagnostic.h"
115382 +#include "cfgloop.h"
115383 +#include "langhooks.h"
115384 +
115385 +#if BUILDING_GCC_VERSION >= 4008
115386 +#define TODO_dump_func 0
115387 +#endif
115388 +
115389 +#define NAME(node) IDENTIFIER_POINTER(DECL_NAME(node))
115390 +
115391 +// unused type flag in all versions 4.5-4.8
115392 +#define TYPE_USERSPACE(TYPE) TYPE_LANG_FLAG_3(TYPE)
115393 +
115394 +int plugin_is_GPL_compatible;
115395 +void debug_gimple_stmt(gimple gs);
115396 +
115397 +static struct plugin_info structleak_plugin_info = {
115398 + .version = "201304082245",
115399 + .help = "disable\tdo not activate plugin\n",
115400 +};
115401 +
115402 +static tree handle_user_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
115403 +{
115404 + *no_add_attrs = true;
115405 +
115406 + // check for types? for now accept everything linux has to offer
115407 + if (TREE_CODE(*node) != FIELD_DECL)
115408 + return NULL_TREE;
115409 +
115410 + *no_add_attrs = false;
115411 + return NULL_TREE;
115412 +}
115413 +
115414 +static struct attribute_spec user_attr = {
115415 + .name = "user",
115416 + .min_length = 0,
115417 + .max_length = 0,
115418 + .decl_required = false,
115419 + .type_required = false,
115420 + .function_type_required = false,
115421 + .handler = handle_user_attribute,
115422 +#if BUILDING_GCC_VERSION >= 4007
115423 + .affects_type_identity = true
115424 +#endif
115425 +};
115426 +
115427 +static void register_attributes(void *event_data, void *data)
115428 +{
115429 + register_attribute(&user_attr);
115430 +// register_attribute(&force_attr);
115431 +}
115432 +
115433 +static tree get_field_type(tree field)
115434 +{
115435 + return strip_array_types(TREE_TYPE(field));
115436 +}
115437 +
115438 +static bool is_userspace_type(tree type)
115439 +{
115440 + tree field;
115441 +
115442 + for (field = TYPE_FIELDS(type); field; field = TREE_CHAIN(field)) {
115443 + tree fieldtype = get_field_type(field);
115444 + enum tree_code code = TREE_CODE(fieldtype);
115445 +
115446 + if (code == RECORD_TYPE || code == UNION_TYPE)
115447 + if (is_userspace_type(fieldtype))
115448 + return true;
115449 +
115450 + if (lookup_attribute("user", DECL_ATTRIBUTES(field)))
115451 + return true;
115452 + }
115453 + return false;
115454 +}
115455 +
115456 +static void finish_type(void *event_data, void *data)
115457 +{
115458 + tree type = (tree)event_data;
115459 +
115460 + if (TYPE_USERSPACE(type))
115461 + return;
115462 +
115463 + if (is_userspace_type(type))
115464 + TYPE_USERSPACE(type) = 1;
115465 +}
115466 +
115467 +static void initialize(tree var)
115468 +{
115469 + basic_block bb;
115470 + gimple_stmt_iterator gsi;
115471 + tree initializer;
115472 + gimple init_stmt;
115473 +
115474 + // this is the original entry bb before the forced split
115475 + // TODO: check further BBs in case more splits occured before us
115476 + bb = ENTRY_BLOCK_PTR->next_bb->next_bb;
115477 +
115478 + // first check if the variable is already initialized, warn otherwise
115479 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
115480 + gimple stmt = gsi_stmt(gsi);
115481 + tree rhs1;
115482 +
115483 + // we're looking for an assignment of a single rhs...
115484 + if (!gimple_assign_single_p(stmt))
115485 + continue;
115486 + rhs1 = gimple_assign_rhs1(stmt);
115487 +#if BUILDING_GCC_VERSION >= 4007
115488 + // ... of a non-clobbering expression...
115489 + if (TREE_CLOBBER_P(rhs1))
115490 + continue;
115491 +#endif
115492 + // ... to our variable...
115493 + if (gimple_get_lhs(stmt) != var)
115494 + continue;
115495 + // if it's an initializer then we're good
115496 + if (TREE_CODE(rhs1) == CONSTRUCTOR)
115497 + return;
115498 + }
115499 +
115500 + // these aren't the 0days you're looking for
115501 +// inform(DECL_SOURCE_LOCATION(var), "userspace variable will be forcibly initialized");
115502 +
115503 + // build the initializer expression
115504 + initializer = build_constructor(TREE_TYPE(var), NULL);
115505 +
115506 + // build the initializer stmt
115507 + init_stmt = gimple_build_assign(var, initializer);
115508 + gsi = gsi_start_bb(ENTRY_BLOCK_PTR->next_bb);
115509 + gsi_insert_before(&gsi, init_stmt, GSI_NEW_STMT);
115510 + update_stmt(init_stmt);
115511 +}
115512 +
115513 +static unsigned int handle_function(void)
115514 +{
115515 + basic_block bb;
115516 + unsigned int ret = 0;
115517 + tree var;
115518 +
115519 +#if BUILDING_GCC_VERSION == 4005
115520 + tree vars;
115521 +#else
115522 + unsigned int i;
115523 +#endif
115524 +
115525 + // split the first bb where we can put the forced initializers
115526 + bb = split_block_after_labels(ENTRY_BLOCK_PTR)->dest;
115527 + if (dom_info_available_p(CDI_DOMINATORS))
115528 + set_immediate_dominator(CDI_DOMINATORS, bb, ENTRY_BLOCK_PTR);
115529 +
115530 + // enumarate all local variables and forcibly initialize our targets
115531 +#if BUILDING_GCC_VERSION == 4005
115532 + for (vars = cfun->local_decls; vars; vars = TREE_CHAIN(vars)) {
115533 + var = TREE_VALUE(vars);
115534 +#else
115535 + FOR_EACH_LOCAL_DECL(cfun, i, var) {
115536 +#endif
115537 + tree type = TREE_TYPE(var);
115538 +
115539 + gcc_assert(DECL_P(var));
115540 + if (!auto_var_in_fn_p(var, current_function_decl))
115541 + continue;
115542 +
115543 + // only care about structure types
115544 + if (TREE_CODE(type) != RECORD_TYPE && TREE_CODE(type) != UNION_TYPE)
115545 + continue;
115546 +
115547 + // if the type is of interest, examine the variable
115548 + if (TYPE_USERSPACE(type))
115549 + initialize(var);
115550 + }
115551 +
115552 + return ret;
115553 +}
115554 +
115555 +static struct gimple_opt_pass structleak_pass = {
115556 + .pass = {
115557 + .type = GIMPLE_PASS,
115558 + .name = "structleak",
115559 +#if BUILDING_GCC_VERSION >= 4008
115560 + .optinfo_flags = OPTGROUP_NONE,
115561 +#endif
115562 + .gate = NULL,
115563 + .execute = handle_function,
115564 + .sub = NULL,
115565 + .next = NULL,
115566 + .static_pass_number = 0,
115567 + .tv_id = TV_NONE,
115568 + .properties_required = PROP_cfg,
115569 + .properties_provided = 0,
115570 + .properties_destroyed = 0,
115571 + .todo_flags_start = 0,
115572 + .todo_flags_finish = TODO_verify_ssa | TODO_verify_stmts | TODO_dump_func | TODO_remove_unused_locals | TODO_update_ssa | TODO_ggc_collect | TODO_verify_flow
115573 + }
115574 +};
115575 +
115576 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
115577 +{
115578 + int i;
115579 + const char * const plugin_name = plugin_info->base_name;
115580 + const int argc = plugin_info->argc;
115581 + const struct plugin_argument * const argv = plugin_info->argv;
115582 + bool enable = true;
115583 +
115584 + struct register_pass_info structleak_pass_info = {
115585 + .pass = &structleak_pass.pass,
115586 + .reference_pass_name = "ssa",
115587 + .ref_pass_instance_number = 1,
115588 + .pos_op = PASS_POS_INSERT_AFTER
115589 + };
115590 +
115591 + if (!plugin_default_version_check(version, &gcc_version)) {
115592 + error(G_("incompatible gcc/plugin versions"));
115593 + return 1;
115594 + }
115595 +
115596 + if (strcmp(lang_hooks.name, "GNU C")) {
115597 + inform(UNKNOWN_LOCATION, G_("%s supports C only"), plugin_name);
115598 + enable = false;
115599 + }
115600 +
115601 + for (i = 0; i < argc; ++i) {
115602 + if (!strcmp(argv[i].key, "disable")) {
115603 + enable = false;
115604 + continue;
115605 + }
115606 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
115607 + }
115608 +
115609 + register_callback(plugin_name, PLUGIN_INFO, NULL, &structleak_plugin_info);
115610 + if (enable) {
115611 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &structleak_pass_info);
115612 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
115613 + }
115614 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
115615 +
115616 + return 0;
115617 +}
115618 diff --git a/tools/lib/lk/Makefile b/tools/lib/lk/Makefile
115619 index 3dba0a4..97175dc 100644
115620 --- a/tools/lib/lk/Makefile
115621 +++ b/tools/lib/lk/Makefile
115622 @@ -13,7 +13,7 @@ LIB_OBJS += $(OUTPUT)debugfs.o
115623
115624 LIBFILE = liblk.a
115625
115626 -CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
115627 +CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) -fPIC
115628 EXTLIBS = -lelf -lpthread -lrt -lm
115629 ALL_CFLAGS = $(CFLAGS) $(BASIC_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
115630 ALL_LDFLAGS = $(LDFLAGS)
115631 diff --git a/tools/perf/util/include/asm/alternative-asm.h b/tools/perf/util/include/asm/alternative-asm.h
115632 index 6789d78..4afd019e 100644
115633 --- a/tools/perf/util/include/asm/alternative-asm.h
115634 +++ b/tools/perf/util/include/asm/alternative-asm.h
115635 @@ -5,4 +5,7 @@
115636
115637 #define altinstruction_entry #
115638
115639 + .macro pax_force_retaddr rip=0, reload=0
115640 + .endm
115641 +
115642 #endif
115643 diff --git a/tools/perf/util/include/linux/compiler.h b/tools/perf/util/include/linux/compiler.h
115644 index 96b919d..c49bb74 100644
115645 --- a/tools/perf/util/include/linux/compiler.h
115646 +++ b/tools/perf/util/include/linux/compiler.h
115647 @@ -18,4 +18,12 @@
115648 #define __force
115649 #endif
115650
115651 +#ifndef __size_overflow
115652 +# define __size_overflow(...)
115653 +#endif
115654 +
115655 +#ifndef __intentional_overflow
115656 +# define __intentional_overflow(...)
115657 +#endif
115658 +
115659 #endif
115660 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
115661 index aac732d..bc87a5d 100644
115662 --- a/virt/kvm/kvm_main.c
115663 +++ b/virt/kvm/kvm_main.c
115664 @@ -75,12 +75,17 @@ LIST_HEAD(vm_list);
115665
115666 static cpumask_var_t cpus_hardware_enabled;
115667 static int kvm_usage_count = 0;
115668 -static atomic_t hardware_enable_failed;
115669 +static atomic_unchecked_t hardware_enable_failed;
115670
115671 struct kmem_cache *kvm_vcpu_cache;
115672 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
115673
115674 -static __read_mostly struct preempt_ops kvm_preempt_ops;
115675 +static void kvm_sched_in(struct preempt_notifier *pn, int cpu);
115676 +static void kvm_sched_out(struct preempt_notifier *pn, struct task_struct *next);
115677 +static struct preempt_ops kvm_preempt_ops = {
115678 + .sched_in = kvm_sched_in,
115679 + .sched_out = kvm_sched_out,
115680 +};
115681
115682 struct dentry *kvm_debugfs_dir;
115683
115684 @@ -749,7 +754,7 @@ int __kvm_set_memory_region(struct kvm *kvm,
115685 /* We can read the guest memory with __xxx_user() later on. */
115686 if ((mem->slot < KVM_USER_MEM_SLOTS) &&
115687 ((mem->userspace_addr & (PAGE_SIZE - 1)) ||
115688 - !access_ok(VERIFY_WRITE,
115689 + !__access_ok(VERIFY_WRITE,
115690 (void __user *)(unsigned long)mem->userspace_addr,
115691 mem->memory_size)))
115692 goto out;
115693 @@ -1613,8 +1618,17 @@ EXPORT_SYMBOL_GPL(kvm_read_guest_cached);
115694
115695 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
115696 {
115697 - return kvm_write_guest_page(kvm, gfn, (const void *) empty_zero_page,
115698 - offset, len);
115699 + int r;
115700 + unsigned long addr;
115701 +
115702 + addr = gfn_to_hva(kvm, gfn);
115703 + if (kvm_is_error_hva(addr))
115704 + return -EFAULT;
115705 + r = __clear_user((void __user *)addr + offset, len);
115706 + if (r)
115707 + return -EFAULT;
115708 + mark_page_dirty(kvm, gfn);
115709 + return 0;
115710 }
115711 EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
115712
115713 @@ -1867,7 +1881,7 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
115714 return 0;
115715 }
115716
115717 -static struct file_operations kvm_vcpu_fops = {
115718 +static file_operations_no_const kvm_vcpu_fops __read_only = {
115719 .release = kvm_vcpu_release,
115720 .unlocked_ioctl = kvm_vcpu_ioctl,
115721 #ifdef CONFIG_COMPAT
115722 @@ -2553,7 +2567,7 @@ static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
115723 return 0;
115724 }
115725
115726 -static struct file_operations kvm_vm_fops = {
115727 +static file_operations_no_const kvm_vm_fops __read_only = {
115728 .release = kvm_vm_release,
115729 .unlocked_ioctl = kvm_vm_ioctl,
115730 #ifdef CONFIG_COMPAT
115731 @@ -2654,7 +2668,7 @@ out:
115732 return r;
115733 }
115734
115735 -static struct file_operations kvm_chardev_ops = {
115736 +static file_operations_no_const kvm_chardev_ops __read_only = {
115737 .unlocked_ioctl = kvm_dev_ioctl,
115738 .compat_ioctl = kvm_dev_ioctl,
115739 .llseek = noop_llseek,
115740 @@ -2680,7 +2694,7 @@ static void hardware_enable_nolock(void *junk)
115741
115742 if (r) {
115743 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
115744 - atomic_inc(&hardware_enable_failed);
115745 + atomic_inc_unchecked(&hardware_enable_failed);
115746 printk(KERN_INFO "kvm: enabling virtualization on "
115747 "CPU%d failed\n", cpu);
115748 }
115749 @@ -2734,10 +2748,10 @@ static int hardware_enable_all(void)
115750
115751 kvm_usage_count++;
115752 if (kvm_usage_count == 1) {
115753 - atomic_set(&hardware_enable_failed, 0);
115754 + atomic_set_unchecked(&hardware_enable_failed, 0);
115755 on_each_cpu(hardware_enable_nolock, NULL, 1);
115756
115757 - if (atomic_read(&hardware_enable_failed)) {
115758 + if (atomic_read_unchecked(&hardware_enable_failed)) {
115759 hardware_disable_all_nolock();
115760 r = -EBUSY;
115761 }
115762 @@ -3171,7 +3185,7 @@ static void kvm_sched_out(struct preempt_notifier *pn,
115763 kvm_arch_vcpu_put(vcpu);
115764 }
115765
115766 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
115767 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
115768 struct module *module)
115769 {
115770 int r;
115771 @@ -3218,7 +3232,7 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
115772 if (!vcpu_align)
115773 vcpu_align = __alignof__(struct kvm_vcpu);
115774 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
115775 - 0, NULL);
115776 + SLAB_USERCOPY, NULL);
115777 if (!kvm_vcpu_cache) {
115778 r = -ENOMEM;
115779 goto out_free_3;
115780 @@ -3228,9 +3242,11 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
115781 if (r)
115782 goto out_free;
115783
115784 + pax_open_kernel();
115785 kvm_chardev_ops.owner = module;
115786 kvm_vm_fops.owner = module;
115787 kvm_vcpu_fops.owner = module;
115788 + pax_close_kernel();
115789
115790 r = misc_register(&kvm_dev);
115791 if (r) {
115792 @@ -3240,9 +3256,6 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
115793
115794 register_syscore_ops(&kvm_syscore_ops);
115795
115796 - kvm_preempt_ops.sched_in = kvm_sched_in;
115797 - kvm_preempt_ops.sched_out = kvm_sched_out;
115798 -
115799 r = kvm_init_debug();
115800 if (r) {
115801 printk(KERN_ERR "kvm: create debugfs files failed\n");